From 50ac6607845755e594c8a39b9c6a00d1c9b48ea4 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 25 Jun 2013 19:03:56 -0700 Subject: cfg80211/nl80211: rename packet pattern related structures and enums Currently packet patterns and it's enum/structures are used only for WoWLAN feature. As we intend to reuse them for new feature packet coalesce, they are renamed in this patch. Older names are kept for backward compatibility purpose. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1737a3e..1adb803 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2094,7 +2094,7 @@ static void ath9k_wow_add_pattern(struct ath_softc *sc, { struct ath_hw *ah = sc->sc_ah; struct ath9k_wow_pattern *wow_pattern = NULL; - struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns; + struct cfg80211_pkt_pattern *patterns = wowlan->patterns; int mask_len; s8 i = 0; diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index ef5fa89..0bd631f 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2298,8 +2298,7 @@ EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); #ifdef CONFIG_PM static bool -mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat, - s8 *byte_seq) +mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq) { int j, k, valid_byte_cnt = 0; bool dont_care_byte = false; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index b8db55c..d1b19c3 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1315,7 +1315,7 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl) #ifdef CONFIG_PM static int -wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p) +wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p) { int num_fields = 0, in_field = 0, fields_size = 0; int i, pattern_len = 0; @@ -1458,9 +1458,9 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter, * Allocates an RX filter returned through f * which needs to be freed using rx_filter_free() */ -static int wl1271_convert_wowlan_pattern_to_rx_filter( - struct cfg80211_wowlan_trig_pkt_pattern *p, - struct wl12xx_rx_filter **f) +static int +wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p, + struct wl12xx_rx_filter **f) { int i, j, ret = 0; struct wl12xx_rx_filter *filter; @@ -1562,7 +1562,7 @@ static int wl1271_configure_wowlan(struct wl1271 *wl, /* Translate WoWLAN patterns into filters */ for (i = 0; i < wow->n_patterns; i++) { - struct cfg80211_wowlan_trig_pkt_pattern *p; + struct cfg80211_pkt_pattern *p; struct wl12xx_rx_filter *filter = NULL; p = &wow->patterns[i]; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 7b0730a..207b079 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1698,7 +1698,7 @@ struct cfg80211_pmksa { }; /** - * struct cfg80211_wowlan_trig_pkt_pattern - packet pattern + * struct cfg80211_pkt_pattern - packet pattern * @mask: bitmask where to match pattern and where to ignore bytes, * one bit per byte, in same format as nl80211 * @pattern: bytes to match where bitmask is 1 @@ -1708,7 +1708,7 @@ struct cfg80211_pmksa { * Internal note: @mask and @pattern are allocated in one chunk of * memory, free @mask only! */ -struct cfg80211_wowlan_trig_pkt_pattern { +struct cfg80211_pkt_pattern { u8 *mask, *pattern; int pattern_len; int pkt_offset; @@ -1770,7 +1770,7 @@ struct cfg80211_wowlan { bool any, disconnect, magic_pkt, gtk_rekey_failure, eap_identity_req, four_way_handshake, rfkill_release; - struct cfg80211_wowlan_trig_pkt_pattern *patterns; + struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 861e5eb..de0ce80 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3060,11 +3060,11 @@ enum nl80211_tx_power_setting { }; /** - * enum nl80211_wowlan_packet_pattern_attr - WoWLAN packet pattern attribute - * @__NL80211_WOWLAN_PKTPAT_INVALID: invalid number for nested attribute - * @NL80211_WOWLAN_PKTPAT_PATTERN: the pattern, values where the mask has + * enum nl80211_packet_pattern_attr - packet pattern attribute + * @__NL80211_PKTPAT_INVALID: invalid number for nested attribute + * @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has * a zero bit are ignored - * @NL80211_WOWLAN_PKTPAT_MASK: pattern mask, must be long enough to have + * @NL80211_PKTPAT_MASK: pattern mask, must be long enough to have * a bit for each byte in the pattern. The lowest-order bit corresponds * to the first byte of the pattern, but the bytes of the pattern are * in a little-endian-like format, i.e. the 9th byte of the pattern @@ -3075,23 +3075,23 @@ enum nl80211_tx_power_setting { * Note that the pattern matching is done as though frames were not * 802.11 frames but 802.3 frames, i.e. the frame is fully unpacked * first (including SNAP header unpacking) and then matched. - * @NL80211_WOWLAN_PKTPAT_OFFSET: packet offset, pattern is matched after + * @NL80211_PKTPAT_OFFSET: packet offset, pattern is matched after * these fixed number of bytes of received packet - * @NUM_NL80211_WOWLAN_PKTPAT: number of attributes - * @MAX_NL80211_WOWLAN_PKTPAT: max attribute number + * @NUM_NL80211_PKTPAT: number of attributes + * @MAX_NL80211_PKTPAT: max attribute number */ -enum nl80211_wowlan_packet_pattern_attr { - __NL80211_WOWLAN_PKTPAT_INVALID, - NL80211_WOWLAN_PKTPAT_MASK, - NL80211_WOWLAN_PKTPAT_PATTERN, - NL80211_WOWLAN_PKTPAT_OFFSET, +enum nl80211_packet_pattern_attr { + __NL80211_PKTPAT_INVALID, + NL80211_PKTPAT_MASK, + NL80211_PKTPAT_PATTERN, + NL80211_PKTPAT_OFFSET, - NUM_NL80211_WOWLAN_PKTPAT, - MAX_NL80211_WOWLAN_PKTPAT = NUM_NL80211_WOWLAN_PKTPAT - 1, + NUM_NL80211_PKTPAT, + MAX_NL80211_PKTPAT = NUM_NL80211_PKTPAT - 1, }; /** - * struct nl80211_wowlan_pattern_support - pattern support information + * struct nl80211_pattern_support - packet pattern support information * @max_patterns: maximum number of patterns supported * @min_pattern_len: minimum length of each pattern * @max_pattern_len: maximum length of each pattern @@ -3101,13 +3101,22 @@ enum nl80211_wowlan_packet_pattern_attr { * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the * capability information given by the kernel to userspace. */ -struct nl80211_wowlan_pattern_support { +struct nl80211_pattern_support { __u32 max_patterns; __u32 min_pattern_len; __u32 max_pattern_len; __u32 max_pkt_offset; } __attribute__((packed)); +/* only for backward compatibility */ +#define __NL80211_WOWLAN_PKTPAT_INVALID __NL80211_PKTPAT_INVALID +#define NL80211_WOWLAN_PKTPAT_MASK NL80211_PKTPAT_MASK +#define NL80211_WOWLAN_PKTPAT_PATTERN NL80211_PKTPAT_PATTERN +#define NL80211_WOWLAN_PKTPAT_OFFSET NL80211_PKTPAT_OFFSET +#define NUM_NL80211_WOWLAN_PKTPAT NUM_NL80211_PKTPAT +#define MAX_NL80211_WOWLAN_PKTPAT MAX_NL80211_PKTPAT +#define nl80211_wowlan_pattern_support nl80211_pattern_support + /** * enum nl80211_wowlan_triggers - WoWLAN trigger definitions * @__NL80211_WOWLAN_TRIG_INVALID: invalid number for nested attributes @@ -3127,7 +3136,7 @@ struct nl80211_wowlan_pattern_support { * pattern matching is done after the packet is converted to the MSDU. * * In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute - * carrying a &struct nl80211_wowlan_pattern_support. + * carrying a &struct nl80211_pattern_support. * * When reporting wakeup. it is a u32 attribute containing the 0-based * index of the pattern that caused the wakeup, in the patterns passed @@ -3284,7 +3293,7 @@ struct nl80211_wowlan_tcp_data_token_feature { * @NL80211_WOWLAN_TCP_WAKE_PAYLOAD: wake packet payload, for advertising a * u32 attribute holding the maximum length * @NL80211_WOWLAN_TCP_WAKE_MASK: Wake packet payload mask, not used for - * feature advertising. The mask works like @NL80211_WOWLAN_PKTPAT_MASK + * feature advertising. The mask works like @NL80211_PKTPAT_MASK * but on the TCP payload only. * @NUM_NL80211_WOWLAN_TCP: number of TCP attributes * @MAX_NL80211_WOWLAN_TCP: highest attribute number diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 1cc47ac..a044762 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -974,7 +974,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg, return -ENOBUFS; if (dev->wiphy.wowlan->n_patterns) { - struct nl80211_wowlan_pattern_support pat = { + struct nl80211_pattern_support pat = { .max_patterns = dev->wiphy.wowlan->n_patterns, .min_pattern_len = dev->wiphy.wowlan->pattern_min_len, .max_pattern_len = dev->wiphy.wowlan->pattern_max_len, @@ -7591,12 +7591,11 @@ static int nl80211_send_wowlan_patterns(struct sk_buff *msg, if (!nl_pat) return -ENOBUFS; pat_len = wowlan->patterns[i].pattern_len; - if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK, - DIV_ROUND_UP(pat_len, 8), + if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), wowlan->patterns[i].mask) || - nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN, - pat_len, wowlan->patterns[i].pattern) || - nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET, + nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, + wowlan->patterns[i].pattern) || + nla_put_u32(msg, NL80211_PKTPAT_OFFSET, wowlan->patterns[i].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); @@ -7937,7 +7936,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) struct nlattr *pat; int n_patterns = 0; int rem, pat_len, mask_len, pkt_offset; - struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT]; + struct nlattr *pat_tb[NUM_NL80211_PKTPAT]; nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) @@ -7956,26 +7955,25 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) { - nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT, - nla_data(pat), nla_len(pat), NULL); + nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat), + nla_len(pat), NULL); err = -EINVAL; - if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] || - !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]) + if (!pat_tb[NL80211_PKTPAT_MASK] || + !pat_tb[NL80211_PKTPAT_PATTERN]) goto error; - pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]); + pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]); mask_len = DIV_ROUND_UP(pat_len, 8); - if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) != - mask_len) + if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len) goto error; if (pat_len > wowlan->pattern_max_len || pat_len < wowlan->pattern_min_len) goto error; - if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]) + if (!pat_tb[NL80211_PKTPAT_OFFSET]) pkt_offset = 0; else pkt_offset = nla_get_u32( - pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]); + pat_tb[NL80211_PKTPAT_OFFSET]); if (pkt_offset > wowlan->max_pkt_offset) goto error; new_triggers.patterns[i].pkt_offset = pkt_offset; @@ -7989,11 +7987,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) new_triggers.patterns[i].pattern = new_triggers.patterns[i].mask + mask_len; memcpy(new_triggers.patterns[i].mask, - nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]), + nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); new_triggers.patterns[i].pattern_len = pat_len; memcpy(new_triggers.patterns[i].pattern, - nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]), + nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); i++; } -- cgit v0.10.2 From 803768f54ef84cb4aaac0b51274b11b31885588c Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 28 Jun 2013 10:39:58 +0200 Subject: nl80211: enable HT overrides for ibss Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 207b079..79c2277 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1592,6 +1592,9 @@ struct cfg80211_disassoc_request { * user space. Otherwise, port is marked authorized by default. * @basic_rates: bitmap of basic rates to use when creating the IBSS * @mcast_rate: per-band multicast rate index + 1 (0: disabled) + * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask + * will be used in ht_capa. Un-supported values will be ignored. + * @ht_capa_mask: The bits of ht_capa which are to be used. */ struct cfg80211_ibss_params { u8 *ssid; @@ -1605,6 +1608,8 @@ struct cfg80211_ibss_params { bool privacy; bool control_port; int mcast_rate[IEEE80211_NUM_BANDS]; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; }; /** diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a044762..0492478 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6346,6 +6346,19 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) return err; } + if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) + memcpy(&ibss.ht_capa_mask, + nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), + sizeof(ibss.ht_capa_mask)); + + if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { + if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) + return -EINVAL; + memcpy(&ibss.ht_capa, + nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), + sizeof(ibss.ht_capa)); + } + if (info->attrs[NL80211_ATTR_MCAST_RATE] && !nl80211_parse_mcast_rate(rdev, ibss.mcast_rate, nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) -- cgit v0.10.2 From 822854b0b1d8f893279ca717f3d084aa5fcd9ff5 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 28 Jun 2013 10:39:59 +0200 Subject: mac80211: enable HT overrides for ibss Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index f83534f..529bf58 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -19,13 +19,14 @@ #include "ieee80211_i.h" #include "rate.h" -static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, +static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa, + struct ieee80211_ht_cap *ht_capa_mask, struct ieee80211_sta_ht_cap *ht_cap, u16 flag) { __le16 le_flag = cpu_to_le16(flag); - if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) { - if (!(sdata->u.mgd.ht_capa.cap_info & le_flag)) + if (ht_capa_mask->cap_info & le_flag) { + if (!(ht_capa->cap_info & le_flag)) ht_cap->cap &= ~flag; } } @@ -33,13 +34,30 @@ static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap) { - u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask); - u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask); + struct ieee80211_ht_cap *ht_capa, *ht_capa_mask; + u8 *scaps, *smask; int i; if (!ht_cap->ht_supported) return; + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ht_capa = &sdata->u.mgd.ht_capa; + ht_capa_mask = &sdata->u.mgd.ht_capa_mask; + break; + case NL80211_IFTYPE_ADHOC: + ht_capa = &sdata->u.ibss.ht_capa; + ht_capa_mask = &sdata->u.ibss.ht_capa_mask; + break; + default: + WARN_ON_ONCE(1); + return; + } + + scaps = (u8 *)(&ht_capa->mcs.rx_mask); + smask = (u8 *)(&ht_capa_mask->mcs.rx_mask); + /* NOTE: If you add more over-rides here, update register_hw * ht_capa_mod_msk logic in main.c as well. * And, if this method can ever change ht_cap.ht_supported, fix @@ -55,28 +73,32 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, } /* Force removal of HT-40 capabilities? */ - __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40); - __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40); + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_SUP_WIDTH_20_40); + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_SGI_40); /* Allow user to disable SGI-20 (SGI-40 is handled above) */ - __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_20); + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_SGI_20); /* Allow user to disable the max-AMSDU bit. */ - __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU); + __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, + IEEE80211_HT_CAP_MAX_AMSDU); /* Allow user to decrease AMPDU factor */ - if (sdata->u.mgd.ht_capa_mask.ampdu_params_info & + if (ht_capa_mask->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_FACTOR) { - u8 n = sdata->u.mgd.ht_capa.ampdu_params_info - & IEEE80211_HT_AMPDU_PARM_FACTOR; + u8 n = ht_capa->ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_FACTOR; if (n < ht_cap->ampdu_factor) ht_cap->ampdu_factor = n; } /* Allow the user to increase AMPDU density. */ - if (sdata->u.mgd.ht_capa_mask.ampdu_params_info & + if (ht_capa_mask->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_DENSITY) { - u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info & + u8 n = (ht_capa->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT; if (n > ht_cap->ampdu_density) @@ -112,7 +134,8 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, * we advertised a restricted capability set to. Override * our own capabilities and then use those below. */ - if (sdata->vif.type == NL80211_IFTYPE_STATION && + if ((sdata->vif.type == NL80211_IFTYPE_STATION || + sdata->vif.type == NL80211_IFTYPE_ADHOC) && !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) ieee80211_apply_htcap_overrides(sdata, &own_cap); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index ea7b9c2..7f290a8 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -179,8 +179,12 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, chandef.width != NL80211_CHAN_WIDTH_5 && chandef.width != NL80211_CHAN_WIDTH_10 && sband->ht_cap.ht_supported) { - pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, - sband->ht_cap.cap); + struct ieee80211_sta_ht_cap ht_cap; + + memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); + ieee80211_apply_htcap_overrides(sdata, &ht_cap); + + pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); /* * Note: According to 802.11n-2009 9.13.3.1, HT Protection * field and RIFS Mode are reserved in IBSS mode, therefore @@ -1051,6 +1055,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); sdata->u.ibss.ssid_len = params->ssid_len; + memcpy(&sdata->u.ibss.ht_capa, ¶ms->ht_capa, + sizeof(sdata->u.ibss.ht_capa)); + memcpy(&sdata->u.ibss.ht_capa_mask, ¶ms->ht_capa_mask, + sizeof(sdata->u.ibss.ht_capa_mask)); + /* * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is * reserved, but an HT STA shall protect HT transmissions as though @@ -1131,6 +1140,11 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) presp = rcu_dereference_protected(ifibss->presp, lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); + + /* on the next join, re-program HT parameters */ + memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); + memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask)); + sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8412a30..683751a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -509,6 +509,9 @@ struct ieee80211_if_ibss { /* probe response/beacon for IBSS */ struct beacon_data __rcu *presp; + struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ + struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ + spinlock_t incomplete_lock; struct list_head incomplete_stations; -- cgit v0.10.2 From ad24b0da9ecec433091f74596843703e4cf5da77 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 5 Jul 2013 11:53:28 +0200 Subject: wireless: indent kernel-doc with tabs Almost everywhere tabs are used to indent continuation lines, replace the few places that use spaces. Signed-off-by: Johannes Berg diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 79c2277..4940960 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -490,7 +490,7 @@ enum survey_info_flags { * @channel: the channel this survey record reports, mandatory * @filled: bitflag of flags from &enum survey_info_flags * @noise: channel noise in dBm. This and all following fields are - * optional + * optional * @channel_time: amount of time in ms the radio spent on the channel * @channel_time_busy: amount of time the primary channel was sensed busy * @channel_time_ext_busy: amount of time the extension channel was sensed busy @@ -546,9 +546,9 @@ struct cfg80211_crypto_settings { /** * struct cfg80211_beacon_data - beacon data * @head: head portion of beacon (before TIM IE) - * or %NULL if not changed + * or %NULL if not changed * @tail: tail portion of beacon (after TIM IE) - * or %NULL if not changed + * or %NULL if not changed * @head_len: length of @head * @tail_len: length of @tail * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL @@ -764,7 +764,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @STATION_INFO_PLINK_STATE: @plink_state filled * @STATION_INFO_SIGNAL: @signal filled * @STATION_INFO_TX_BITRATE: @txrate fields are filled - * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) + * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) * @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value * @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value * @STATION_INFO_TX_RETRIES: @tx_retries filled @@ -1509,7 +1509,7 @@ enum cfg80211_assoc_req_flags { * @prev_bssid: previous BSSID, if not %NULL use reassociate frame * @flags: See &enum cfg80211_assoc_req_flags * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask - * will be used in ht_capa. Un-supported values will be ignored. + * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT capability override * @vht_capa_mask: VHT capability mask indicating which fields to use @@ -1593,7 +1593,7 @@ struct cfg80211_disassoc_request { * @basic_rates: bitmap of basic rates to use when creating the IBSS * @mcast_rate: per-band multicast rate index + 1 (0: disabled) * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask - * will be used in ht_capa. Un-supported values will be ignored. + * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. */ struct cfg80211_ibss_params { @@ -1635,9 +1635,9 @@ struct cfg80211_ibss_params { * @key: WEP key for shared key authentication * @flags: See &enum cfg80211_assoc_req_flags * @bg_scan_period: Background scan period in seconds - * or -1 to indicate that default value is to be used. + * or -1 to indicate that default value is to be used. * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask - * will be used in ht_capa. Un-supported values will be ignored. + * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT Capability overrides * @vht_capa_mask: The bits of vht_capa which are to be used. diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5b7a3da..7c5dc78 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1004,11 +1004,11 @@ enum ieee80211_smps_mode { * @radar_enabled: whether radar detection is enabled * * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame - * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11, - * but actually means the number of transmissions not the number of retries + * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11, + * but actually means the number of transmissions not the number of retries * @short_frame_max_tx_count: Maximum number of transmissions for a "short" - * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the - * number of transmissions not the number of retries + * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the + * number of transmissions not the number of retries * * @smps_mode: spatial multiplexing powersave mode; note that * %IEEE80211_SMPS_STATIC is used when the device is not @@ -1092,7 +1092,7 @@ enum ieee80211_vif_flags { * be off when it is %NULL there can still be races and packets could be * processed after it switches back to %NULL. * @debugfs_dir: debugfs dentry, can be used by drivers to create own per - * interface debug files. Note that it will be NULL for the virtual + * interface debug files. Note that it will be NULL for the virtual * monitor interface (if that is requested.) * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *). @@ -1425,10 +1425,10 @@ struct ieee80211_tx_control { * the stack. * * @IEEE80211_HW_CONNECTION_MONITOR: - * The hardware performs its own connection monitoring, including - * periodic keep-alives to the AP and probing the AP on beacon loss. - * When this flag is set, signaling beacon-loss will cause an immediate - * change to disassociated state. + * The hardware performs its own connection monitoring, including + * periodic keep-alives to the AP and probing the AP on beacon loss. + * When this flag is set, signaling beacon-loss will cause an immediate + * change to disassociated state. * * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC: * This device needs to get data from beacon before association (i.e. @@ -1526,10 +1526,10 @@ enum ieee80211_hw_flags { * @channel_change_time: time (in microseconds) it takes to change channels. * * @max_signal: Maximum value for signal (rssi) in RX information, used - * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB + * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB * * @max_listen_interval: max listen interval in units of beacon interval - * that HW supports + * that HW supports * * @queues: number of available hardware transmit queues for * data packets. WMM/QoS requires at least four, these @@ -2443,7 +2443,7 @@ enum ieee80211_roc_type { * The callback can sleep. * * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware. - * Currently, this is only used for IBSS mode debugging. Is not a + * Currently, this is only used for IBSS mode debugging. Is not a * required function. * The callback can sleep. * -- cgit v0.10.2 From a144f378a489b5900c028425cb0d0a7a3fc8c1c1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 3 Jul 2013 13:34:02 +0200 Subject: mac80211: add per-chain signal information to radiotap When per-chain signal information is available, don't add the antenna field once but instead add a radiotap namespace for each chain containing the chain/antenna number and the signal strength on that chain. Signed-off-by: Johannes Berg Signed-off-by: Johannes Berg diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 23dbcfc..c9f2ca4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -87,11 +87,13 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local, int len; /* always present fields */ - len = sizeof(struct ieee80211_radiotap_header) + 9; + len = sizeof(struct ieee80211_radiotap_header) + 8; - /* allocate extra bitmap */ + /* allocate extra bitmaps */ if (status->vendor_radiotap_len) len += 4; + if (status->chains) + len += 4 * hweight8(status->chains); if (ieee80211_have_rx_timestamp(status)) { len = ALIGN(len, 8); @@ -100,6 +102,10 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local, if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) len += 1; + /* antenna field, if we don't have per-chain info */ + if (!status->chains) + len += 1; + /* padding for RX_FLAGS if necessary */ len = ALIGN(len, 2); @@ -116,6 +122,11 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local, len += 12; } + if (status->chains) { + /* antenna and antenna signal fields */ + len += 2 * hweight8(status->chains); + } + if (status->vendor_radiotap_len) { if (WARN_ON_ONCE(status->vendor_radiotap_align == 0)) status->vendor_radiotap_align = 1; @@ -145,8 +156,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_header *rthdr; unsigned char *pos; + __le32 *it_present; + u32 it_present_val; u16 rx_flags = 0; - int mpdulen; + int mpdulen, chain; + unsigned long chains = status->chains; mpdulen = skb->len; if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))) @@ -154,25 +168,39 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); memset(rthdr, 0, rtap_len); + it_present = &rthdr->it_present; /* radiotap header, set always present flags */ - rthdr->it_present = - cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | - (1 << IEEE80211_RADIOTAP_CHANNEL) | - (1 << IEEE80211_RADIOTAP_ANTENNA) | - (1 << IEEE80211_RADIOTAP_RX_FLAGS)); rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len); + it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | + BIT(IEEE80211_RADIOTAP_CHANNEL) | + BIT(IEEE80211_RADIOTAP_RX_FLAGS); + + if (!status->chains) + it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); - pos = (unsigned char *)(rthdr + 1); + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { + it_present_val |= + BIT(IEEE80211_RADIOTAP_EXT) | + BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); + put_unaligned_le32(it_present_val, it_present); + it_present++; + it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | + BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + } if (status->vendor_radiotap_len) { - rthdr->it_present |= - cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) | - cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT)); - put_unaligned_le32(status->vendor_radiotap_bitmap, pos); - pos += 4; + it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | + BIT(IEEE80211_RADIOTAP_EXT); + put_unaligned_le32(it_present_val, it_present); + it_present++; + it_present_val = status->vendor_radiotap_bitmap; } + put_unaligned_le32(it_present_val, it_present); + + pos = (void *)(it_present + 1); + /* the order of the following fields is important */ /* IEEE80211_RADIOTAP_TSFT */ @@ -242,9 +270,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ - /* IEEE80211_RADIOTAP_ANTENNA */ - *pos = status->antenna; - pos++; + if (!status->chains) { + /* IEEE80211_RADIOTAP_ANTENNA */ + *pos = status->antenna; + pos++; + } /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ @@ -341,6 +371,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, pos += 2; } + for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { + *pos++ = status->chain_signal[chain]; + *pos++ = chain; + } + if (status->vendor_radiotap_len) { /* ensure 2 byte alignment for the vendor field as required */ if ((pos - (u8 *)rthdr) & 1) -- cgit v0.10.2 From be29b99a9b51b0338eea3c66a58de53bbd01de24 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Fri, 28 Jun 2013 11:51:26 -0700 Subject: cfg80211/nl80211: Add packet coalesce support In most cases, host that receives IPv4 and IPv6 multicast/broadcast packets does not do anything with these packets. Therefore the reception of these unwanted packets causes unnecessary processing and power consumption. Packet coalesce feature helps to reduce number of received interrupts to host by buffering these packets in firmware/hardware for some predefined time. Received interrupt will be generated when one of the following events occur. a) Expiration of hardware timer whose expiration time is set to maximum coalescing delay of matching coalesce rule. b) Coalescing buffer in hardware reaches it's limit. c) Packet doesn't match any of the configured coalesce rules. This patch adds set/get configuration support for packet coalesce. User needs to configure following parameters for creating a coalesce rule. a) Maximum coalescing delay b) List of packet patterns which needs to be matched c) Condition for coalescence. pattern 'match' or 'no match' Multiple such rules can be created. This feature needs to be advertised during driver initialization. Drivers are supposed to do required firmware/hardware settings based on user configuration. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao [fix kernel-doc, change free function, fix copy/paste error] Signed-off-by: Johannes Berg diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4940960..071ed23 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1781,6 +1781,35 @@ struct cfg80211_wowlan { }; /** + * struct cfg80211_coalesce_rules - Coalesce rule parameters + * + * This structure defines coalesce rule for the device. + * @delay: maximum coalescing delay in msecs. + * @condition: condition for packet coalescence. + * see &enum nl80211_coalesce_condition. + * @patterns: array of packet patterns + * @n_patterns: number of patterns + */ +struct cfg80211_coalesce_rules { + int delay; + enum nl80211_coalesce_condition condition; + struct cfg80211_pkt_pattern *patterns; + int n_patterns; +}; + +/** + * struct cfg80211_coalesce - Packet coalescing settings + * + * This structure defines coalescing settings. + * @rules: array of coalesce rules + * @n_rules: number of rules + */ +struct cfg80211_coalesce { + struct cfg80211_coalesce_rules *rules; + int n_rules; +}; + +/** * struct cfg80211_wowlan_wakeup - wakeup report * @disconnect: woke up by getting disconnected * @magic_pkt: woke up by receiving magic packet @@ -2076,6 +2105,7 @@ struct cfg80211_update_ft_ies_params { * driver can take the most appropriate actions. * @crit_proto_stop: Indicates critical protocol no longer needs increased link * reliability. This operation can not fail. + * @set_coalesce: Set coalesce parameters. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -2311,6 +2341,8 @@ struct cfg80211_ops { u16 duration); void (*crit_proto_stop)(struct wiphy *wiphy, struct wireless_dev *wdev); + int (*set_coalesce)(struct wiphy *wiphy, + struct cfg80211_coalesce *coalesce); }; /* @@ -2537,6 +2569,25 @@ struct wiphy_wowlan_support { }; /** + * struct wiphy_coalesce_support - coalesce support data + * @n_rules: maximum number of coalesce rules + * @max_delay: maximum supported coalescing delay in msecs + * @n_patterns: number of supported patterns in a rule + * (see nl80211.h for the pattern definition) + * @pattern_max_len: maximum length of each pattern + * @pattern_min_len: minimum length of each pattern + * @max_pkt_offset: maximum Rx packet offset + */ +struct wiphy_coalesce_support { + int n_rules; + int max_delay; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; +}; + +/** * struct wiphy - wireless hardware description * @reg_notifier: the driver's regulatory notification callback, * note that if your driver uses wiphy_apply_custom_regulatory() @@ -2646,6 +2697,7 @@ struct wiphy_wowlan_support { * 802.11-2012 8.4.2.29 for the defined fields. * @extended_capabilities_mask: mask of the valid values * @extended_capabilities_len: length of the extended capabilities + * @coalesce: packet coalescing support information */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -2755,6 +2807,8 @@ struct wiphy { const struct iw_handler_def *wext; #endif + const struct wiphy_coalesce_support *coalesce; + char priv[0] __aligned(NETDEV_ALIGN); }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index de0ce80..5abc54d 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -126,6 +126,31 @@ */ /** + * DOC: packet coalesce support + * + * In most cases, host that receives IPv4 and IPv6 multicast/broadcast + * packets does not do anything with these packets. Therefore the + * reception of these unwanted packets causes unnecessary processing + * and power consumption. + * + * Packet coalesce feature helps to reduce number of received interrupts + * to host by buffering these packets in firmware/hardware for some + * predefined time. Received interrupt will be generated when one of the + * following events occur. + * a) Expiration of hardware timer whose expiration time is set to maximum + * coalescing delay of matching coalesce rule. + * b) Coalescing buffer in hardware reaches it's limit. + * c) Packet doesn't match any of the configured coalesce rules. + * + * User needs to configure following parameters for creating a coalesce + * rule. + * a) Maximum coalescing delay + * b) List of packet patterns which needs to be matched + * c) Condition for coalescence. pattern 'match' or 'no match' + * Multiple such rules can be created. + */ + +/** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@ -648,6 +673,9 @@ * @NL80211_CMD_CRIT_PROTOCOL_STOP: Indicates the connection reliability can * return back to normal. * + * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules. + * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -810,6 +838,9 @@ enum nl80211_commands { NL80211_CMD_CRIT_PROTOCOL_START, NL80211_CMD_CRIT_PROTOCOL_STOP, + NL80211_CMD_GET_COALESCE, + NL80211_CMD_SET_COALESCE, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1436,6 +1467,8 @@ enum nl80211_commands { * allowed to be used with the first @NL80211_CMD_SET_STATION command to * update a TDLS peer STA entry. * + * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1736,6 +1769,8 @@ enum nl80211_attrs { NL80211_ATTR_PEER_AID, + NL80211_ATTR_COALESCE_RULE, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3098,8 +3133,10 @@ enum nl80211_packet_pattern_attr { * @max_pkt_offset: maximum Rx packet offset * * This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when - * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the - * capability information given by the kernel to userspace. + * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED or in + * %NL80211_ATTR_COALESCE_RULE_PKT_PATTERN when that is part of + * %NL80211_ATTR_COALESCE_RULE in the capability information given + * by the kernel to userspace. */ struct nl80211_pattern_support { __u32 max_patterns; @@ -3318,6 +3355,55 @@ enum nl80211_wowlan_tcp_attrs { }; /** + * struct nl80211_coalesce_rule_support - coalesce rule support information + * @max_rules: maximum number of rules supported + * @pat: packet pattern support information + * @max_delay: maximum supported coalescing delay in msecs + * + * This struct is carried in %NL80211_ATTR_COALESCE_RULE in the + * capability information given by the kernel to userspace. + */ +struct nl80211_coalesce_rule_support { + __u32 max_rules; + struct nl80211_pattern_support pat; + __u32 max_delay; +} __attribute__((packed)); + +/** + * enum nl80211_attr_coalesce_rule - coalesce rule attribute + * @__NL80211_COALESCE_RULE_INVALID: invalid number for nested attribute + * @NL80211_ATTR_COALESCE_RULE_DELAY: delay in msecs used for packet coalescing + * @NL80211_ATTR_COALESCE_RULE_CONDITION: condition for packet coalescence, + * see &enum nl80211_coalesce_condition. + * @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN: packet offset, pattern is matched + * after these fixed number of bytes of received packet + * @NUM_NL80211_ATTR_COALESCE_RULE: number of attributes + * @NL80211_ATTR_COALESCE_RULE_MAX: max attribute number + */ +enum nl80211_attr_coalesce_rule { + __NL80211_COALESCE_RULE_INVALID, + NL80211_ATTR_COALESCE_RULE_DELAY, + NL80211_ATTR_COALESCE_RULE_CONDITION, + NL80211_ATTR_COALESCE_RULE_PKT_PATTERN, + + /* keep last */ + NUM_NL80211_ATTR_COALESCE_RULE, + NL80211_ATTR_COALESCE_RULE_MAX = NUM_NL80211_ATTR_COALESCE_RULE - 1 +}; + +/** + * enum nl80211_coalesce_condition - coalesce rule conditions + * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns + * in a rule are matched. + * @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns + * in a rule are not matched. + */ +enum nl80211_coalesce_condition { + NL80211_COALESCE_CONDITION_MATCH, + NL80211_COALESCE_CONDITION_NO_MATCH +}; + +/** * enum nl80211_iface_limit_attrs - limit attributes * @NL80211_IFACE_LIMIT_UNSPEC: (reserved) * @NL80211_IFACE_LIMIT_MAX: maximum number of interfaces that diff --git a/net/wireless/core.c b/net/wireless/core.c index 4f9f216..389a3f2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -462,6 +462,14 @@ int wiphy_register(struct wiphy *wiphy) return -EINVAL; #endif + if (WARN_ON(wiphy->coalesce && + (!wiphy->coalesce->n_rules || + !wiphy->coalesce->n_patterns) && + (!wiphy->coalesce->pattern_min_len || + wiphy->coalesce->pattern_min_len > + wiphy->coalesce->pattern_max_len))) + return -EINVAL; + if (WARN_ON(wiphy->ap_sme_capa && !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME))) return -EINVAL; @@ -668,6 +676,7 @@ void wiphy_unregister(struct wiphy *wiphy) rdev_set_wakeup(rdev, false); #endif cfg80211_rdev_free_wowlan(rdev); + cfg80211_rdev_free_coalesce(rdev); } EXPORT_SYMBOL(wiphy_unregister); diff --git a/net/wireless/core.h b/net/wireless/core.h index a6b45bf..9ad43c6 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -79,6 +79,8 @@ struct cfg80211_registered_device { /* netlink port which started critical protocol (0 means not started) */ u32 crit_proto_nlportid; + struct cfg80211_coalesce *coalesce; + /* must be last because of the way we do wiphy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wiphy wiphy __aligned(NETDEV_ALIGN); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0492478..6dca5a7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -403,6 +403,14 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = { [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 }, }; +/* policy for coalesce rule attributes */ +static const struct nla_policy +nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { + [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 }, + [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 }, + [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED }, +}; + /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { @@ -995,6 +1003,27 @@ static int nl80211_send_wowlan(struct sk_buff *msg, } #endif +static int nl80211_send_coalesce(struct sk_buff *msg, + struct cfg80211_registered_device *dev) +{ + struct nl80211_coalesce_rule_support rule; + + if (!dev->wiphy.coalesce) + return 0; + + rule.max_rules = dev->wiphy.coalesce->n_rules; + rule.max_delay = dev->wiphy.coalesce->max_delay; + rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns; + rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len; + rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len; + rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset; + + if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule)) + return -ENOBUFS; + + return 0; +} + static int nl80211_send_band_rateinfo(struct sk_buff *msg, struct ieee80211_supported_band *sband) { @@ -1513,6 +1542,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev, dev->wiphy.vht_capa_mod_mask)) goto nla_put_failure; + state->split_start++; + break; + case 10: + if (nl80211_send_coalesce(msg, dev)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@ -8043,6 +8078,264 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) } #endif +static int nl80211_send_coalesce_rules(struct sk_buff *msg, + struct cfg80211_registered_device *rdev) +{ + struct nlattr *nl_pats, *nl_pat, *nl_rule, *nl_rules; + int i, j, pat_len; + struct cfg80211_coalesce_rules *rule; + + if (!rdev->coalesce->n_rules) + return 0; + + nl_rules = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE); + if (!nl_rules) + return -ENOBUFS; + + for (i = 0; i < rdev->coalesce->n_rules; i++) { + nl_rule = nla_nest_start(msg, i + 1); + if (!nl_rule) + return -ENOBUFS; + + rule = &rdev->coalesce->rules[i]; + if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY, + rule->delay)) + return -ENOBUFS; + + if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION, + rule->condition)) + return -ENOBUFS; + + nl_pats = nla_nest_start(msg, + NL80211_ATTR_COALESCE_RULE_PKT_PATTERN); + if (!nl_pats) + return -ENOBUFS; + + for (j = 0; j < rule->n_patterns; j++) { + nl_pat = nla_nest_start(msg, j + 1); + if (!nl_pat) + return -ENOBUFS; + pat_len = rule->patterns[j].pattern_len; + if (nla_put(msg, NL80211_PKTPAT_MASK, + DIV_ROUND_UP(pat_len, 8), + rule->patterns[j].mask) || + nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, + rule->patterns[j].pattern) || + nla_put_u32(msg, NL80211_PKTPAT_OFFSET, + rule->patterns[j].pkt_offset)) + return -ENOBUFS; + nla_nest_end(msg, nl_pat); + } + nla_nest_end(msg, nl_pats); + nla_nest_end(msg, nl_rule); + } + nla_nest_end(msg, nl_rules); + + return 0; +} + +static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct sk_buff *msg; + void *hdr; + + if (!rdev->wiphy.coalesce) + return -EOPNOTSUPP; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, + NL80211_CMD_GET_COALESCE); + if (!hdr) + goto nla_put_failure; + + if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + return genlmsg_reply(msg, info); + +nla_put_failure: + nlmsg_free(msg); + return -ENOBUFS; +} + +void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev) +{ + struct cfg80211_coalesce *coalesce = rdev->coalesce; + int i, j; + struct cfg80211_coalesce_rules *rule; + + if (!coalesce) + return; + + for (i = 0; i < coalesce->n_rules; i++) { + rule = &coalesce->rules[i]; + for (j = 0; j < rule->n_patterns; j++) + kfree(rule->patterns[j].mask); + kfree(rule->patterns); + } + kfree(coalesce->rules); + kfree(coalesce); + rdev->coalesce = NULL; +} + +static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, + struct nlattr *rule, + struct cfg80211_coalesce_rules *new_rule) +{ + int err, i; + const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce; + struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat; + int rem, pat_len, mask_len, pkt_offset, n_patterns = 0; + struct nlattr *pat_tb[NUM_NL80211_PKTPAT]; + + err = nla_parse(tb, NL80211_ATTR_COALESCE_RULE_MAX, nla_data(rule), + nla_len(rule), nl80211_coalesce_policy); + if (err) + return err; + + if (tb[NL80211_ATTR_COALESCE_RULE_DELAY]) + new_rule->delay = + nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]); + if (new_rule->delay > coalesce->max_delay) + return -EINVAL; + + if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION]) + new_rule->condition = + nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]); + if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH && + new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH) + return -EINVAL; + + if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN]) + return -EINVAL; + + nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], + rem) + n_patterns++; + if (n_patterns > coalesce->n_patterns) + return -EINVAL; + + new_rule->patterns = kcalloc(n_patterns, sizeof(new_rule->patterns[0]), + GFP_KERNEL); + if (!new_rule->patterns) + return -ENOMEM; + + new_rule->n_patterns = n_patterns; + i = 0; + + nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], + rem) { + nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat), + nla_len(pat), NULL); + if (!pat_tb[NL80211_PKTPAT_MASK] || + !pat_tb[NL80211_PKTPAT_PATTERN]) + return -EINVAL; + pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]); + mask_len = DIV_ROUND_UP(pat_len, 8); + if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len) + return -EINVAL; + if (pat_len > coalesce->pattern_max_len || + pat_len < coalesce->pattern_min_len) + return -EINVAL; + + if (!pat_tb[NL80211_PKTPAT_OFFSET]) + pkt_offset = 0; + else + pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]); + if (pkt_offset > coalesce->max_pkt_offset) + return -EINVAL; + new_rule->patterns[i].pkt_offset = pkt_offset; + + new_rule->patterns[i].mask = + kmalloc(mask_len + pat_len, GFP_KERNEL); + if (!new_rule->patterns[i].mask) + return -ENOMEM; + new_rule->patterns[i].pattern = + new_rule->patterns[i].mask + mask_len; + memcpy(new_rule->patterns[i].mask, + nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); + new_rule->patterns[i].pattern_len = pat_len; + memcpy(new_rule->patterns[i].pattern, + nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); + i++; + } + + return 0; +} + +static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce; + struct cfg80211_coalesce new_coalesce = {}; + struct cfg80211_coalesce *n_coalesce; + int err, rem_rule, n_rules = 0, i, j; + struct nlattr *rule; + struct cfg80211_coalesce_rules *tmp_rule; + + if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) { + cfg80211_rdev_free_coalesce(rdev); + rdev->ops->set_coalesce(&rdev->wiphy, NULL); + return 0; + } + + nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE], + rem_rule) + n_rules++; + if (n_rules > coalesce->n_rules) + return -EINVAL; + + new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]), + GFP_KERNEL); + if (!new_coalesce.rules) + return -ENOMEM; + + new_coalesce.n_rules = n_rules; + i = 0; + + nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE], + rem_rule) { + err = nl80211_parse_coalesce_rule(rdev, rule, + &new_coalesce.rules[i]); + if (err) + goto error; + + i++; + } + + err = rdev->ops->set_coalesce(&rdev->wiphy, &new_coalesce); + if (err) + goto error; + + n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL); + if (!n_coalesce) { + err = -ENOMEM; + goto error; + } + cfg80211_rdev_free_coalesce(rdev); + rdev->coalesce = n_coalesce; + + return 0; +error: + for (i = 0; i < new_coalesce.n_rules; i++) { + tmp_rule = &new_coalesce.rules[i]; + for (j = 0; j < tmp_rule->n_patterns; j++) + kfree(tmp_rule->patterns[j].mask); + kfree(tmp_rule->patterns); + } + kfree(new_coalesce.rules); + + return err; +} + static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; @@ -9050,6 +9343,21 @@ static struct genl_ops nl80211_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, + }, + { + .cmd = NL80211_CMD_GET_COALESCE, + .doit = nl80211_get_coalesce, + .policy = nl80211_policy, + .internal_flags = NL80211_FLAG_NEED_WIPHY | + NL80211_FLAG_NEED_RTNL, + }, + { + .cmd = NL80211_CMD_SET_COALESCE, + .doit = nl80211_set_coalesce, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_WIPHY | + NL80211_FLAG_NEED_RTNL, } }; diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index a4073e8..44341bf 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -74,4 +74,6 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev, enum nl80211_radar_event event, struct net_device *netdev, gfp_t gfp); +void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev); + #endif /* __NET_WIRELESS_NL80211_H */ -- cgit v0.10.2 From dcd6eac1f3b5fa1df11dfa99da0cf75b76cfef97 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:49 +0200 Subject: nl80211: add scan width to bss and scan request structs To allow scanning and working with 5 MHz and 10 MHz BSS, extend the inform bss commands and add wrappers to take 5 and 10 MHz bss into account. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 071ed23..bae8614 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1285,6 +1285,7 @@ struct cfg80211_ssid { * @n_ssids: number of SSIDs * @channels: channels to scan on. * @n_channels: total number of channels to scan + * @scan_width: channel width for scanning * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @flags: bit field of flags controlling operation @@ -1300,6 +1301,7 @@ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; + enum nl80211_bss_scan_width scan_width; const u8 *ie; size_t ie_len; u32 flags; @@ -1333,6 +1335,7 @@ struct cfg80211_match_set { * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) * @n_ssids: number of SSIDs * @n_channels: total number of channels to scan + * @scan_width: channel width for scanning * @interval: interval between each scheduled scan cycle * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets @@ -1352,6 +1355,7 @@ struct cfg80211_sched_scan_request { struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; + enum nl80211_bss_scan_width scan_width; u32 interval; const u8 *ie; size_t ie_len; @@ -1403,6 +1407,7 @@ struct cfg80211_bss_ies { * for use in scan results and similar. * * @channel: channel this BSS is on + * @scan_width: width of the control channel * @bssid: BSSID of the BSS * @beacon_interval: the beacon interval as from the frame * @capability: the capability field in host byte order @@ -1424,6 +1429,7 @@ struct cfg80211_bss_ies { */ struct cfg80211_bss { struct ieee80211_channel *channel; + enum nl80211_bss_scan_width scan_width; const struct cfg80211_bss_ies __rcu *ies; const struct cfg80211_bss_ies __rcu *beacon_ies; @@ -3438,10 +3444,11 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy); void cfg80211_sched_scan_stopped(struct wiphy *wiphy); /** - * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame + * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame * * @wiphy: the wiphy reporting the BSS * @channel: The channel the frame was received on + * @scan_width: width of the control channel * @mgmt: the management frame (probe response or beacon) * @len: length of the management frame * @signal: the signal strength, type depends on the wiphy's signal_type @@ -3454,16 +3461,29 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy); * Or %NULL on error. */ struct cfg80211_bss * __must_check +cfg80211_inform_bss_width_frame(struct wiphy *wiphy, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width, + struct ieee80211_mgmt *mgmt, size_t len, + s32 signal, gfp_t gfp); + +static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_channel *channel, struct ieee80211_mgmt *mgmt, size_t len, - s32 signal, gfp_t gfp); + s32 signal, gfp_t gfp) +{ + return cfg80211_inform_bss_width_frame(wiphy, channel, + NL80211_BSS_CHAN_WIDTH_20, + mgmt, len, signal, gfp); +} /** * cfg80211_inform_bss - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS * @channel: The channel the frame was received on + * @scan_width: width of the control channel * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) * @capability: the capability field sent by the peer @@ -3480,11 +3500,26 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, * Or %NULL on error. */ struct cfg80211_bss * __must_check +cfg80211_inform_bss_width(struct wiphy *wiphy, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + s32 signal, gfp_t gfp); + +static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, - s32 signal, gfp_t gfp); + s32 signal, gfp_t gfp) +{ + return cfg80211_inform_bss_width(wiphy, channel, + NL80211_BSS_CHAN_WIDTH_20, + bssid, tsf, capability, + beacon_interval, ie, ielen, signal, + gfp); +} struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, @@ -3530,6 +3565,19 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); */ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); +static inline enum nl80211_bss_scan_width +cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return NL80211_BSS_CHAN_WIDTH_5; + case NL80211_CHAN_WIDTH_10: + return NL80211_BSS_CHAN_WIDTH_10; + default: + return NL80211_BSS_CHAN_WIDTH_20; + } +} + /** * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame * @dev: network device diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5abc54d..eb68735 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2808,6 +2808,21 @@ enum nl80211_chan_width { }; /** + * enum nl80211_bss_scan_width - control channel width for a BSS + * + * These values are used with the %NL80211_BSS_CHAN_WIDTH attribute. + * + * @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible + * @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide + * @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide + */ +enum nl80211_bss_scan_width { + NL80211_BSS_CHAN_WIDTH_20, + NL80211_BSS_CHAN_WIDTH_10, + NL80211_BSS_CHAN_WIDTH_5, +}; + +/** * enum nl80211_bss - netlink attributes for a BSS * * @__NL80211_BSS_INVALID: invalid @@ -2831,6 +2846,8 @@ enum nl80211_chan_width { * @NL80211_BSS_BEACON_IES: binary attribute containing the raw information * elements from a Beacon frame (bin); not present if no Beacon frame has * yet been received + * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel + * (u32, enum nl80211_bss_scan_width) * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -2847,6 +2864,7 @@ enum nl80211_bss { NL80211_BSS_STATUS, NL80211_BSS_SEEN_MS_AGO, NL80211_BSS_BEACON_IES, + NL80211_BSS_CHAN_WIDTH, /* keep last */ __NL80211_BSS_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6dca5a7..ef4c312c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5674,6 +5674,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, goto nla_put_failure; if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) || nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) || + nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) || nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO, jiffies_to_msecs(jiffies - intbss->ts))) goto nla_put_failure; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ae8c186..ad1e406 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -651,6 +651,8 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, continue; if (bss->pub.channel != new->pub.channel) continue; + if (bss->pub.scan_width != new->pub.scan_width) + continue; if (rcu_access_pointer(bss->pub.beacon_ies)) continue; ies = rcu_access_pointer(bss->pub.ies); @@ -870,11 +872,12 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, /* Returned bss is reference counted and must be cleaned up appropriately. */ struct cfg80211_bss* -cfg80211_inform_bss(struct wiphy *wiphy, - struct ieee80211_channel *channel, - const u8 *bssid, u64 tsf, u16 capability, - u16 beacon_interval, const u8 *ie, size_t ielen, - s32 signal, gfp_t gfp) +cfg80211_inform_bss_width(struct wiphy *wiphy, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + s32 signal, gfp_t gfp) { struct cfg80211_bss_ies *ies; struct cfg80211_internal_bss tmp = {}, *res; @@ -892,6 +895,7 @@ cfg80211_inform_bss(struct wiphy *wiphy, memcpy(tmp.pub.bssid, bssid, ETH_ALEN); tmp.pub.channel = channel; + tmp.pub.scan_width = scan_width; tmp.pub.signal = signal; tmp.pub.beacon_interval = beacon_interval; tmp.pub.capability = capability; @@ -924,14 +928,15 @@ cfg80211_inform_bss(struct wiphy *wiphy, /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } -EXPORT_SYMBOL(cfg80211_inform_bss); +EXPORT_SYMBOL(cfg80211_inform_bss_width); /* Returned bss is reference counted and must be cleaned up appropriately. */ struct cfg80211_bss * -cfg80211_inform_bss_frame(struct wiphy *wiphy, - struct ieee80211_channel *channel, - struct ieee80211_mgmt *mgmt, size_t len, - s32 signal, gfp_t gfp) +cfg80211_inform_bss_width_frame(struct wiphy *wiphy, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width, + struct ieee80211_mgmt *mgmt, size_t len, + s32 signal, gfp_t gfp) { struct cfg80211_internal_bss tmp = {}, *res; struct cfg80211_bss_ies *ies; @@ -941,7 +946,8 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) != offsetof(struct ieee80211_mgmt, u.beacon.variable)); - trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal); + trace_cfg80211_inform_bss_width_frame(wiphy, channel, scan_width, mgmt, + len, signal); if (WARN_ON(!mgmt)) return NULL; @@ -976,6 +982,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN); tmp.pub.channel = channel; + tmp.pub.scan_width = scan_width; tmp.pub.signal = signal; tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); @@ -991,7 +998,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } -EXPORT_SYMBOL(cfg80211_inform_bss_frame); +EXPORT_SYMBOL(cfg80211_inform_bss_width_frame); void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) { diff --git a/net/wireless/trace.h b/net/wireless/trace.h index e1534baf..09af6eb 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2391,26 +2391,30 @@ TRACE_EVENT(cfg80211_get_bss, __entry->capa_mask, __entry->capa_val) ); -TRACE_EVENT(cfg80211_inform_bss_frame, +TRACE_EVENT(cfg80211_inform_bss_width_frame, TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width, struct ieee80211_mgmt *mgmt, size_t len, s32 signal), - TP_ARGS(wiphy, channel, mgmt, len, signal), + TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY + __field(enum nl80211_bss_scan_width, scan_width) __dynamic_array(u8, mgmt, len) __field(s32, signal) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_ASSIGN(channel); + __entry->scan_width = scan_width; if (mgmt) memcpy(__get_dynamic_array(mgmt), mgmt, len); __entry->signal = signal; ), - TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d", - WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal) + TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d", + WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width, + __entry->signal) ); DECLARE_EVENT_CLASS(cfg80211_bss_evt, -- cgit v0.10.2 From 3de805cf965d69c8d3d7d69368d5fd2c925a2d5a Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:50 +0200 Subject: mac80211/rc80211: add chandef to rate initialization 5 and 10 MHz support needs to know the current operating channel width, add the chandef to the rate control API. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 7eb1f4b..a3c4ca0 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1275,6 +1275,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, } static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { struct ath_softc *sc = priv; @@ -1313,6 +1314,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, } static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta, u32 changed) { diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c index fe31590..aea667b 100644 --- a/drivers/net/wireless/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/iwlegacy/3945-rs.c @@ -887,6 +887,7 @@ il3945_remove_debugfs(void *il, void *il_sta) */ static void il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *il_sta) { } diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index ed3c42a..3ccbaf7 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -2803,6 +2803,7 @@ il4965_rs_remove_debugfs(void *il, void *il_sta) */ static void il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *il_sta) { } diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 1b69394..91eb09b 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -3319,7 +3319,8 @@ static void rs_remove_debugfs(void *priv, void *priv_sta) * station is added we ignore it. */ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta) + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta) { } static struct rate_control_ops rs_ops = { diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index b328a98..376ea21 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -3159,8 +3159,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta) * station is added we ignore it. */ static void rs_rate_init_stub(void *mvm_r, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *mvm_sta) + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *mvm_sta) { } static struct rate_control_ops rs_mvm_ops = { diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c index f9f059d..a98acef 100644 --- a/drivers/net/wireless/rtlwifi/rc.c +++ b/drivers/net/wireless/rtlwifi/rc.c @@ -218,6 +218,7 @@ static void rtl_tx_status(void *ppriv, static void rtl_rate_init(void *ppriv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { } diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7c5dc78..fe5fee8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4204,8 +4204,10 @@ struct rate_control_ops { void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp); void (*rate_init)(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta); void (*rate_update)(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta, u32 changed); void (*free_sta)(void *priv, struct ieee80211_sta *sta, diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index d35a5dd..5dedc56 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -66,11 +66,12 @@ static inline void rate_control_rate_init(struct sta_info *sta) } sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; - rcu_read_unlock(); ieee80211_sta_set_rx_nss(sta); - ref->ops->rate_init(ref->priv, sband, ista, priv_sta); + ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista, + priv_sta); + rcu_read_unlock(); set_sta_flag(sta, WLAN_STA_RATE_CONTROL); } @@ -81,10 +82,21 @@ static inline void rate_control_rate_update(struct ieee80211_local *local, struct rate_control_ref *ref = local->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_chanctx_conf *chanctx_conf; + + if (ref && ref->ops->rate_update) { + rcu_read_lock(); - if (ref && ref->ops->rate_update) - ref->ops->rate_update(ref->priv, sband, ista, - priv_sta, changed); + chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + + ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def, + ista, priv_sta, changed); + rcu_read_unlock(); + } drv_sta_rc_update(local, sta->sdata, &sta->sta, changed); } diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index ac7ef54..5b25966 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -417,7 +417,8 @@ init_sample_table(struct minstrel_sta_info *mi) static void minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta) + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_sta_info *mi = priv_sta; struct minstrel_priv *mp = priv; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 5b2d301..5256297 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -836,6 +836,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, static void minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_priv *mp = priv; @@ -931,22 +932,25 @@ use_legacy: memset(&msp->legacy, 0, sizeof(msp->legacy)); msp->legacy.r = msp->ratelist; msp->legacy.sample_table = msp->sample_table; - return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); + return mac80211_minstrel.rate_init(priv, sband, chandef, sta, + &msp->legacy); } static void minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { - minstrel_ht_update_caps(priv, sband, sta, priv_sta); + minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); } static void minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta, u32 changed) { - minstrel_ht_update_caps(priv, sband, sta, priv_sta); + minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta); } static void * diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index 502d3ec..958fad0 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c @@ -293,6 +293,7 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta, static void rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, struct ieee80211_sta *sta, void *priv_sta) { struct rc_pid_sta_info *spinfo = priv_sta; -- cgit v0.10.2 From 438b61b77082e70d2a408cc77b8c5faac312e940 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:51 +0200 Subject: mac80211: fix timing for 5 MHz and 10 MHz channels according to IEEE 802.11-2012 section 18, various timings change when using 5 MHz and 10 MHz. Reflect this by using a "shift" when calculating durations. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 683751a..338e7ca 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -812,6 +812,34 @@ ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) return band; } +static inline int +ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return 2; + case NL80211_CHAN_WIDTH_10: + return 1; + default: + return 0; + } +} + +static inline int +ieee80211_vif_get_shift(struct ieee80211_vif *vif) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + int shift = 0; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + if (chanctx_conf) + shift = ieee80211_chandef_get_shift(&chanctx_conf->def); + rcu_read_unlock(); + + return shift; +} + enum sdata_queue_type { IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, IEEE80211_SDATA_QUEUE_AGG_START = 1, @@ -1468,7 +1496,8 @@ extern void *mac80211_wiphy_privid; /* for wiphy privid */ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type); int ieee80211_frame_duration(enum ieee80211_band band, size_t len, - int rate, int erp, int short_preamble); + int rate, int erp, int short_preamble, + int shift); void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, struct ieee80211_hdr *hdr, const u8 *tsc, gfp_t gfp); diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 5b25966..a507376 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -382,14 +382,18 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, static void calc_rate_durations(enum ieee80211_band band, struct minstrel_rate *d, - struct ieee80211_rate *rate) + struct ieee80211_rate *rate, + struct cfg80211_chan_def *chandef) { int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); + int shift = ieee80211_chandef_get_shift(chandef); d->perfect_tx_time = ieee80211_frame_duration(band, 1200, - rate->bitrate, erp, 1); + DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1, + shift); d->ack_time = ieee80211_frame_duration(band, 10, - rate->bitrate, erp, 1); + DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1, + shift); } static void @@ -425,14 +429,17 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_rate *ctl_rate; unsigned int i, n = 0; unsigned int t_slot = 9; /* FIXME: get real slot time */ + u32 rate_flags; mi->sta = sta; mi->lowest_rix = rate_lowest_index(sband, sta); ctl_rate = &sband->bitrates[mi->lowest_rix]; mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10, ctl_rate->bitrate, - !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1); + !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1, + ieee80211_chandef_get_shift(chandef)); + rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate)); mi->max_prob_rate = 0; @@ -441,15 +448,22 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0; unsigned int tx_time_single; unsigned int cw = mp->cw_min; + int shift; if (!rate_supported(sta, sband->band, i)) continue; + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + n++; memset(mr, 0, sizeof(*mr)); mr->rix = i; - mr->bitrate = sband->bitrates[i].bitrate / 5; - calc_rate_durations(sband->band, mr, &sband->bitrates[i]); + shift = ieee80211_chandef_get_shift(chandef); + mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + (1 << shift) * 5); + calc_rate_durations(sband->band, mr, &sband->bitrates[i], + chandef); /* calculate maximum number of retransmissions before * fallback (based on maximum segment size) */ @@ -547,6 +561,7 @@ minstrel_init_cck_rates(struct minstrel_priv *mp) { static const int bitrates[4] = { 10, 20, 55, 110 }; struct ieee80211_supported_band *sband; + u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef); int i, j; sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ]; @@ -559,6 +574,9 @@ minstrel_init_cck_rates(struct minstrel_priv *mp) if (rate->flags & IEEE80211_RATE_ERP_G) continue; + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + for (j = 0; j < ARRAY_SIZE(bitrates); j++) { if (rate->bitrate != bitrates[j]) continue; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 5256297..7475a7a 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -862,8 +862,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, mi->sta = sta; mi->stats_update = jiffies; - ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1); - mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur; + ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0); + mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0); + mi->overhead += ack_dur; mi->overhead_rtscts = mi->overhead + 2 * ack_dur; mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 4105d0c..3523daa 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -40,7 +40,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct sk_buff *skb, int group_addr, int next_frag_len) { - int rate, mrate, erp, dur, i; + int rate, mrate, erp, dur, i, shift; struct ieee80211_rate *txrate; struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; @@ -153,6 +153,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, rate = mrate; } + shift = ieee80211_vif_get_shift(&tx->sdata->vif); + /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ if (ieee80211_is_data_qos(hdr->frame_control) && *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK) @@ -162,7 +164,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up * to closest integer */ dur = ieee80211_frame_duration(sband->band, 10, rate, erp, - tx->sdata->vif.bss_conf.use_short_preamble); + tx->sdata->vif.bss_conf.use_short_preamble, + shift); if (next_frag_len) { /* Frame is fragmented: duration increases with time needed to @@ -171,7 +174,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, /* next fragment */ dur += ieee80211_frame_duration(sband->band, next_frag_len, txrate->bitrate, erp, - tx->sdata->vif.bss_conf.use_short_preamble); + tx->sdata->vif.bss_conf.use_short_preamble, + shift); } return cpu_to_le16(dur); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 2265445..61856e1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -107,7 +107,8 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) } int ieee80211_frame_duration(enum ieee80211_band band, size_t len, - int rate, int erp, int short_preamble) + int rate, int erp, int short_preamble, + int shift) { int dur; @@ -118,6 +119,9 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len, * * rate is in 100 kbps, so divident is multiplied by 10 in the * DIV_ROUND_UP() operations. + * + * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and + * is assumed to be 0 otherwise. */ if (band == IEEE80211_BAND_5GHZ || erp) { @@ -130,15 +134,21 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len, * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext * * T_SYM = 4 usec - * 802.11a - 17.5.2: aSIFSTime = 16 usec + * 802.11a - 18.5.2: aSIFSTime = 16 usec * 802.11g - 19.8.4: aSIFSTime = 10 usec + * signal ext = 6 usec */ dur = 16; /* SIFS + signal ext */ - dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */ - dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */ + dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */ + dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */ dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, 4 * rate); /* T_SYM x N_SYM */ + + /* IEEE 802.11-2012 18.3.2.4: all values above are: + * * times 4 for 5 MHz + * * times 2 for 10 MHz + */ + dur *= 1 << shift; } else { /* * 802.11b or 802.11g with 802.11b compatibility: @@ -168,7 +178,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, { struct ieee80211_sub_if_data *sdata; u16 dur; - int erp; + int erp, shift = 0; bool short_preamble = false; erp = 0; @@ -177,10 +187,11 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; + shift = ieee80211_vif_get_shift(vif); } dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp, - short_preamble); + short_preamble, shift); return cpu_to_le16(dur); } @@ -194,7 +205,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; - int erp; + int erp, shift = 0; u16 dur; struct ieee80211_supported_band *sband; @@ -210,17 +221,18 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; + shift = ieee80211_vif_get_shift(vif); } /* CTS duration */ dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate, - erp, short_preamble); + erp, short_preamble, shift); /* Data frame duration */ dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, - erp, short_preamble); + erp, short_preamble, shift); /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, - erp, short_preamble); + erp, short_preamble, shift); return cpu_to_le16(dur); } @@ -235,7 +247,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; - int erp; + int erp, shift = 0; u16 dur; struct ieee80211_supported_band *sband; @@ -250,15 +262,16 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) erp = rate->flags & IEEE80211_RATE_ERP_G; + shift = ieee80211_vif_get_shift(vif); } /* Data frame duration */ dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, - erp, short_preamble); + erp, short_preamble, shift); if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, - erp, short_preamble); + erp, short_preamble, shift); } return cpu_to_le16(dur); -- cgit v0.10.2 From a5e70697d0c4836e69c60de92db27eac9ae71e05 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:52 +0200 Subject: mac80211: add radiotap flag and handling for 5/10 MHz Wireshark already defines radiotap channel flags for 5 and 10 MHz, so just use them in Linux radiotap too. Furthermore, add rx status flags to allow drivers to report when they received data on 5 or 10 MHz channels. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index c6d07cb..8b5b714 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -230,6 +230,10 @@ enum ieee80211_radiotap_type { #define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */ #define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */ #define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */ +#define IEEE80211_CHAN_GSM 0x1000 /* GSM (900 MHz) */ +#define IEEE80211_CHAN_STURBO 0x2000 /* Static Turbo */ +#define IEEE80211_CHAN_HALF 0x4000 /* Half channel (10 MHz wide) */ +#define IEEE80211_CHAN_QUARTER 0x8000 /* Quarter channel (5 MHz wide) */ /* For IEEE80211_RADIOTAP_FLAGS */ #define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received diff --git a/include/net/mac80211.h b/include/net/mac80211.h index fe5fee8..3124036 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -811,6 +811,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC * is stored in the @ampdu_delimiter_crc field) * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 + * @RX_FLAG_10MHZ: 10 MHz (half channel) was used + * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -839,6 +841,8 @@ enum mac80211_rx_flags { RX_FLAG_80P80MHZ = BIT(24), RX_FLAG_160MHZ = BIT(25), RX_FLAG_STBC_MASK = BIT(26) | BIT(27), + RX_FLAG_10MHZ = BIT(28), + RX_FLAG_5MHZ = BIT(29), }; #define RX_FLAG_STBC_SHIFT 26 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c9f2ca4..deeeca1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -159,6 +159,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, __le32 *it_present; u32 it_present_val; u16 rx_flags = 0; + u16 channel_flags = 0; int mpdulen, chain; unsigned long chains = status->chains; @@ -243,20 +244,22 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, /* IEEE80211_RADIOTAP_CHANNEL */ put_unaligned_le16(status->freq, pos); pos += 2; + if (status->flag & RX_FLAG_10MHZ) + channel_flags |= IEEE80211_CHAN_HALF; + else if (status->flag & RX_FLAG_5MHZ) + channel_flags |= IEEE80211_CHAN_QUARTER; + if (status->band == IEEE80211_BAND_5GHZ) - put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, - pos); + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) - put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, - pos); + channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; else if (rate && rate->flags & IEEE80211_RATE_ERP_G) - put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, - pos); + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; else if (rate) - put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, - pos); + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; else - put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos); + channel_flags |= IEEE80211_CHAN_2GHZ; + put_unaligned_le16(channel_flags, pos); pos += 2; /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ -- cgit v0.10.2 From 2103dec14792be2c2194a454630b01120d30e5cb Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:53 +0200 Subject: mac80211: select and adjust bitrates according to channel mode The various components accessing the bitrates table must use consider the used channel bandwidth to select only available rates or calculate the bitrate correctly. There are some rates in reduced bandwidth modes which can't be represented as multiples of 500kbps, like 2.25 MBit/s in 5 MHz mode. The standard suggests to round up to the next multiple of 500kbps, just do that in mac80211 as well. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer [make rate unsigned in ieee80211_add_tx_radiotap_header(), squash fix] Signed-off-by: Johannes Berg diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 8184d12..b82fff6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -395,9 +395,13 @@ void sta_set_rate_info_tx(struct sta_info *sta, rinfo->nss = ieee80211_rate_get_vht_nss(rate); } else { struct ieee80211_supported_band *sband; + int shift = ieee80211_vif_get_shift(&sta->sdata->vif); + u16 brate; + sband = sta->local->hw.wiphy->bands[ ieee80211_get_sdata_band(sta->sdata)]; - rinfo->legacy = sband->bitrates[rate->idx].bitrate; + brate = sband->bitrates[rate->idx].bitrate; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); } if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; @@ -422,11 +426,13 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) rinfo->mcs = sta->last_rx_rate_idx; } else { struct ieee80211_supported_band *sband; + int shift = ieee80211_vif_get_shift(&sta->sdata->vif); + u16 brate; sband = sta->local->hw.wiphy->bands[ ieee80211_get_sdata_band(sta->sdata)]; - rinfo->legacy = - sband->bitrates[sta->last_rx_rate_idx].bitrate; + brate = sband->bitrates[sta->last_rx_rate_idx].bitrate; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); } if (sta->last_rx_rate_flag & RX_FLAG_40MHZ) @@ -1190,8 +1196,6 @@ static int sta_apply_parameters(struct ieee80211_local *local, struct station_parameters *params) { int ret = 0; - u32 rates; - int i, j; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; enum ieee80211_band band = ieee80211_get_sdata_band(sdata); @@ -1284,16 +1288,10 @@ static int sta_apply_parameters(struct ieee80211_local *local, sta->listen_interval = params->listen_interval; if (params->supported_rates) { - rates = 0; - - for (i = 0; i < params->supported_rates_len; i++) { - int rate = (params->supported_rates[i] & 0x7f) * 5; - for (j = 0; j < sband->n_bitrates; j++) { - if (sband->bitrates[j].bitrate == rate) - rates |= BIT(j); - } - } - sta->sta.supp_rates[band] = rates; + ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, + sband, params->supported_rates, + params->supported_rates_len, + &sta->sta.supp_rates[band]); } if (params->ht_capa) @@ -1956,18 +1954,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy, } if (params->basic_rates) { - int i, j; - u32 rates = 0; - struct ieee80211_supported_band *sband = wiphy->bands[band]; - - for (i = 0; i < params->basic_rates_len; i++) { - int rate = (params->basic_rates[i] & 0x7f) * 5; - for (j = 0; j < sband->n_bitrates; j++) { - if (sband->bitrates[j].bitrate == rate) - rates |= BIT(j); - } - } - sdata->vif.bss_conf.basic_rates = rates; + ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, + wiphy->bands[band], + params->basic_rates, + params->basic_rates_len, + &sdata->vif.bss_conf.basic_rates); changed |= BSS_CHANGED_BASIC_RATES; } diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 7f290a8..272a3b3 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -43,16 +43,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; - int rates, i; + int rates_n = 0, i, ri; struct ieee80211_mgmt *mgmt; u8 *pos; struct ieee80211_supported_band *sband; struct cfg80211_bss *bss; - u32 bss_change; + u32 bss_change, rate_flags, rates = 0, rates_added = 0; u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; struct cfg80211_chan_def chandef; struct beacon_data *presp; int frame_len; + int shift; sdata_assert_lock(sdata); @@ -99,6 +100,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, memcpy(ifibss->bssid, bssid, ETH_ALEN); sband = local->hw.wiphy->bands[chan->band]; + shift = ieee80211_vif_get_shift(&sdata->vif); /* Build IBSS probe response */ frame_len = sizeof(struct ieee80211_hdr_3addr) + @@ -134,15 +136,29 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, memcpy(pos, ifibss->ssid, ifibss->ssid_len); pos += ifibss->ssid_len; - rates = min_t(int, 8, sband->n_bitrates); + rate_flags = ieee80211_chandef_rate_flags(&chandef); + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + + rates |= BIT(i); + rates_n++; + } + *pos++ = WLAN_EID_SUPP_RATES; - *pos++ = rates; - for (i = 0; i < rates; i++) { - int rate = sband->bitrates[i].bitrate; + *pos++ = min_t(int, 8, rates_n); + for (ri = 0; ri < sband->n_bitrates; ri++) { + int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, + 5 * (1 << shift)); u8 basic = 0; - if (basic_rates & BIT(i)) + if (!(rates & BIT(ri))) + continue; + + if (basic_rates & BIT(ri)) basic = 0x80; - *pos++ = basic | (u8) (rate / 5); + *pos++ = basic | (u8) rate; + if (++rates_added == 8) + break; } if (sband->band == IEEE80211_BAND_2GHZ) { @@ -157,15 +173,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, *pos++ = 0; *pos++ = 0; - if (sband->n_bitrates > 8) { + /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */ + if (rates_n > 8) { *pos++ = WLAN_EID_EXT_SUPP_RATES; - *pos++ = sband->n_bitrates - 8; - for (i = 8; i < sband->n_bitrates; i++) { - int rate = sband->bitrates[i].bitrate; + *pos++ = rates_n - 8; + for (; ri < sband->n_bitrates; ri++) { + int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, + 5 * (1 << shift)); u8 basic = 0; - if (basic_rates & BIT(i)) + if (!(rates & BIT(ri))) + continue; + + if (basic_rates & BIT(ri)) basic = 0x80; - *pos++ = basic | (u8) (rate / 5); + *pos++ = basic | (u8) rate; } } @@ -244,7 +265,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.ibss_creator = creator; ieee80211_bss_info_change_notify(sdata, bss_change); - ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates); + ieee80211_sta_def_wmm_params(sdata, rates, supp_rates); ifibss->state = IEEE80211_IBSS_MLME_JOINED; mod_timer(&ifibss->timer, @@ -268,6 +289,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, u16 beacon_int = cbss->beacon_interval; const struct cfg80211_bss_ies *ies; u64 tsf; + u32 rate_flags; + int shift; sdata_assert_lock(sdata); @@ -275,15 +298,24 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, beacon_int = 10; sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; + rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef); + shift = ieee80211_vif_get_shift(&sdata->vif); basic_rates = 0; for (i = 0; i < bss->supp_rates_len; i++) { - int rate = (bss->supp_rates[i] & 0x7f) * 5; + int rate = bss->supp_rates[i] & 0x7f; bool is_basic = !!(bss->supp_rates[i] & 0x80); for (j = 0; j < sband->n_bitrates; j++) { - if (sband->bitrates[j].bitrate == rate) { + int brate; + if ((rate_flags & sband->bitrates[j].flags) + != rate_flags) + continue; + + brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, + 5 * (1 << shift)); + if (brate == rate) { if (is_basic) basic_rates |= BIT(j); break; @@ -465,7 +497,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, sta = sta_info_get(sdata, mgmt->sa); if (elems->supp_rates) { - supp_rates = ieee80211_sta_get_rates(local, elems, + supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); if (sta) { u32 prev_rates; @@ -589,7 +621,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n", mgmt->bssid); ieee80211_sta_join_ibss(sdata, bss); - supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL); + supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); rcu_read_unlock(); @@ -1024,6 +1056,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params) { u32 changed = 0; + u32 rate_flags; + struct ieee80211_supported_band *sband; + int i; if (params->bssid) { memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); @@ -1034,6 +1069,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->u.ibss.privacy = params->privacy; sdata->u.ibss.control_port = params->control_port; sdata->u.ibss.basic_rates = params->basic_rates; + + /* fix basic_rates if channel does not support these rates */ + rate_flags = ieee80211_chandef_rate_flags(¶ms->chandef); + sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band]; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + sdata->u.ibss.basic_rates &= ~BIT(i); + } memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, sizeof(params->mcast_rate)); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 338e7ca..6596da6 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1601,7 +1601,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, const u8 *ie, size_t ie_len, enum ieee80211_band band, u32 rate_mask, - u8 channel); + struct cfg80211_chan_def *chandef); struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, u32 ratemask, struct ieee80211_channel *chan, @@ -1617,7 +1617,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, const size_t supp_rates_len, const u8 *supp_rates); -u32 ieee80211_sta_get_rates(struct ieee80211_local *local, +u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum ieee80211_band band, u32 *basic_rates); int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, @@ -1634,6 +1634,9 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 prot_mode); u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap); +int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, + const struct ieee80211_supported_band *sband, + const u8 *srates, int srates_len, u32 *rates); int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum ieee80211_band band); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 447f41b..e536f22 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -62,7 +62,6 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *ie) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee80211_local *local = sdata->local; u32 basic_rates = 0; struct cfg80211_chan_def sta_chan_def; @@ -85,7 +84,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) return false; - ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata), + ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata), &basic_rates); if (sdata->vif.bss_conf.basic_rates != basic_rates) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 02c05fa..6b65d50 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -379,7 +379,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, u32 rates, basic_rates = 0, changed = 0; sband = local->hw.wiphy->bands[band]; - rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates); + rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates); spin_lock_bh(&sta->lock); sta->last_rx = jiffies; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ae31968..f7552c2 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -478,27 +478,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, /* frame sending functions */ -static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, - struct ieee80211_supported_band *sband, - u32 *rates) -{ - int i, j, count; - *rates = 0; - count = 0; - for (i = 0; i < supp_rates_len; i++) { - int rate = (supp_rates[i] & 0x7F) * 5; - - for (j = 0; j < sband->n_bitrates; j++) - if (sband->bitrates[j].bitrate == rate) { - *rates |= BIT(j); - count++; - break; - } - } - - return count; -} - static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u8 ap_ht_param, struct ieee80211_supported_band *sband, @@ -617,12 +596,12 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) struct ieee80211_mgmt *mgmt; u8 *pos, qos_info; size_t offset = 0, noffset; - int i, count, rates_len, supp_rates_len; + int i, count, rates_len, supp_rates_len, shift; u16 capab; struct ieee80211_supported_band *sband; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; - u32 rates = 0; + u32 rate_flags, rates = 0; sdata_assert_lock(sdata); @@ -633,8 +612,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) return; } chan = chanctx_conf->def.chan; + rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); rcu_read_unlock(); sband = local->hw.wiphy->bands[chan->band]; + shift = ieee80211_vif_get_shift(&sdata->vif); if (assoc_data->supp_rates_len) { /* @@ -643,17 +624,24 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) * in the association request (e.g. D-Link DAP 1353 in * b-only mode)... */ - rates_len = ieee80211_compatible_rates(assoc_data->supp_rates, - assoc_data->supp_rates_len, - sband, &rates); + rates_len = ieee80211_parse_bitrates(&chanctx_conf->def, sband, + assoc_data->supp_rates, + assoc_data->supp_rates_len, + &rates); } else { /* * In case AP not provide any supported rates information * before association, we send information element(s) with * all rates that we support. */ - rates = ~0; - rates_len = sband->n_bitrates; + rates_len = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) + != rate_flags) + continue; + rates |= BIT(i); + rates_len++; + } } skb = alloc_skb(local->hw.extra_tx_headroom + @@ -730,8 +718,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) count = 0; for (i = 0; i < sband->n_bitrates; i++) { if (BIT(i) & rates) { - int rate = sband->bitrates[i].bitrate; - *pos++ = (u8) (rate / 5); + int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = (u8) rate; if (++count == 8) break; } @@ -744,8 +733,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) for (i++; i < sband->n_bitrates; i++) { if (BIT(i) & rates) { - int rate = sband->bitrates[i].bitrate; - *pos++ = (u8) (rate / 5); + int rate; + rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = (u8) rate; } } } @@ -2432,15 +2423,16 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, u8 *supp_rates, unsigned int supp_rates_len, u32 *rates, u32 *basic_rates, bool *have_higher_than_11mbit, - int *min_rate, int *min_rate_index) + int *min_rate, int *min_rate_index, + int shift, u32 rate_flags) { int i, j; for (i = 0; i < supp_rates_len; i++) { - int rate = (supp_rates[i] & 0x7f) * 5; + int rate = supp_rates[i] & 0x7f; bool is_basic = !!(supp_rates[i] & 0x80); - if (rate > 110) + if ((rate * 5 * (1 << shift)) > 110) *have_higher_than_11mbit = true; /* @@ -2456,12 +2448,20 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, continue; for (j = 0; j < sband->n_bitrates; j++) { - if (sband->bitrates[j].bitrate == rate) { + struct ieee80211_rate *br; + int brate; + + br = &sband->bitrates[j]; + if ((rate_flags & br->flags) != rate_flags) + continue; + + brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); + if (brate == rate) { *rates |= BIT(j); if (is_basic) *basic_rates |= BIT(j); - if (rate < *min_rate) { - *min_rate = rate; + if ((rate * 5) < *min_rate) { + *min_rate = rate * 5; *min_rate_index = j; } break; @@ -3884,27 +3884,40 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, if (!new_sta) return -ENOMEM; } - if (new_sta) { u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit; int min_rate = INT_MAX, min_rate_index = -1; + struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; const struct cfg80211_bss_ies *ies; + int shift; + u32 rate_flags; sband = local->hw.wiphy->bands[cbss->channel->band]; err = ieee80211_prep_channel(sdata, cbss); if (err) { sta_info_free(local, new_sta); - return err; + return -EINVAL; } + shift = ieee80211_vif_get_shift(&sdata->vif); + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return -EINVAL; + } + rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); + rcu_read_unlock(); ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, &rates, &basic_rates, &have_higher_than_11mbit, - &min_rate, &min_rate_index); + &min_rate, &min_rate_index, + shift, rate_flags); /* * This used to be a workaround for basic rates missing diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 30d58d2..ba63ac8 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -232,37 +232,28 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, /* could not find a basic rate; use original selection */ } -static inline s8 -rate_lowest_non_cck_index(struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta) +static void __rate_control_send_low(struct ieee80211_hw *hw, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, + struct ieee80211_tx_info *info) { int i; + u32 rate_flags = + ieee80211_chandef_rate_flags(&hw->conf.chandef); + if ((sband->band == IEEE80211_BAND_2GHZ) && + (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) + rate_flags |= IEEE80211_RATE_ERP_G; + + info->control.rates[0].idx = 0; for (i = 0; i < sband->n_bitrates; i++) { - struct ieee80211_rate *srate = &sband->bitrates[i]; - if ((srate->bitrate == 10) || (srate->bitrate == 20) || - (srate->bitrate == 55) || (srate->bitrate == 110)) + if (!rate_supported(sta, sband->band, i)) continue; - if (rate_supported(sta, sband->band, i)) - return i; + info->control.rates[0].idx = i; + break; } - - /* No matching rate found */ - return 0; -} - -static void __rate_control_send_low(struct ieee80211_hw *hw, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, - struct ieee80211_tx_info *info) -{ - if ((sband->band != IEEE80211_BAND_2GHZ) || - !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) - info->control.rates[0].idx = rate_lowest_index(sband, sta); - else - info->control.rates[0].idx = - rate_lowest_non_cck_index(sband, sta); + WARN_ON_ONCE(i == sband->n_bitrates); info->control.rates[0].count = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? @@ -585,6 +576,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; bool has_mcs_mask; u32 mask; + u32 rate_flags; int i; /* @@ -594,6 +586,12 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, */ mask = sdata->rc_rateidx_mask[info->band]; has_mcs_mask = sdata->rc_has_mcs_mask[info->band]; + rate_flags = + ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + for (i = 0; i < sband->n_bitrates; i++) + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + mask &= ~BIT(i); + if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask) return; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index deeeca1..0ac7512 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -236,8 +236,13 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, */ *pos = 0; } else { + int shift = 0; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); - *pos = rate->bitrate / 5; + if (status->flag & RX_FLAG_10MHZ) + shift = 1; + else if (status->flag & RX_FLAG_5MHZ) + shift = 2; + *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); } pos++; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 1b122a7..819d095 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -204,10 +204,29 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) ieee80211_rx_bss_put(local, bss); } +static void +ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef, + enum nl80211_bss_scan_width scan_width) +{ + memset(chandef, 0, sizeof(*chandef)); + switch (scan_width) { + case NL80211_BSS_CHAN_WIDTH_5: + chandef->width = NL80211_CHAN_WIDTH_5; + break; + case NL80211_BSS_CHAN_WIDTH_10: + chandef->width = NL80211_CHAN_WIDTH_10; + break; + default: + chandef->width = NL80211_CHAN_WIDTH_20_NOHT; + break; + } +} + /* return false if no more work */ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) { struct cfg80211_scan_request *req = local->scan_req; + struct cfg80211_chan_def chandef; enum ieee80211_band band; int i, ielen, n_chans; @@ -229,11 +248,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) } while (!n_chans); local->hw_scan_req->n_channels = n_chans; + ieee80211_prepare_scan_chandef(&chandef, req->scan_width); ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie, local->hw_scan_ies_bufsize, req->ie, req->ie_len, band, - req->rates[band], 0); + req->rates[band], &chandef); local->hw_scan_req->ie_len = ielen; local->hw_scan_req->no_cck = req->no_cck; @@ -912,6 +932,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_sched_scan_ies sched_scan_ies = {}; + struct cfg80211_chan_def chandef; int ret, i, iebufsz; iebufsz = 2 + IEEE80211_MAX_SSID_LEN + @@ -939,10 +960,12 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, goto out_free; } + ieee80211_prepare_scan_chandef(&chandef, req->scan_width); + sched_scan_ies.len[i] = ieee80211_build_preq_ies(local, sched_scan_ies.ie[i], iebufsz, req->ie, req->ie_len, - i, (u32) -1, 0); + i, (u32) -1, &chandef); } ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 4343920..6ad4c14 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -252,9 +252,10 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info) return len; } -static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band - *sband, struct sk_buff *skb, - int retry_count, int rtap_len) +static void +ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band *sband, + struct sk_buff *skb, int retry_count, + int rtap_len, int shift) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; @@ -280,8 +281,11 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band /* IEEE80211_RADIOTAP_RATE */ if (info->status.rates[0].idx >= 0 && !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) { + u16 rate; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); - *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5; + rate = sband->bitrates[info->status.rates[0].idx].bitrate; + *pos = DIV_ROUND_UP(rate, 5 * (1 << shift)); /* padding for tx flags */ pos += 2; } @@ -424,6 +428,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) bool acked; struct ieee80211_bar *bar; int rtap_len; + int shift = 0; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { if ((info->flags & IEEE80211_TX_CTL_AMPDU) && @@ -458,6 +463,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) continue; + shift = ieee80211_vif_get_shift(&sta->sdata->vif); + if (info->flags & IEEE80211_TX_STATUS_EOSP) clear_sta_flag(sta, WLAN_STA_SP); @@ -624,7 +631,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb(skb); return; } - ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len); + ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len, + shift); /* XXX: is this sufficient for BPF? */ skb_set_mac_header(skb, 0); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3523daa..f82301b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -40,12 +40,22 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct sk_buff *skb, int group_addr, int next_frag_len) { - int rate, mrate, erp, dur, i, shift; + int rate, mrate, erp, dur, i, shift = 0; struct ieee80211_rate *txrate; struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_chanctx_conf *chanctx_conf; + u32 rate_flags = 0; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf); + if (chanctx_conf) { + shift = ieee80211_chandef_get_shift(&chanctx_conf->def); + rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); + } + rcu_read_unlock(); /* assume HW handles this */ if (tx->rate.flags & IEEE80211_TX_RC_MCS) @@ -122,8 +132,11 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, if (r->bitrate > txrate->bitrate) break; + if ((rate_flags & r->flags) != rate_flags) + continue; + if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) - rate = r->bitrate; + rate = DIV_ROUND_UP(r->bitrate, 1 << shift); switch (sband->band) { case IEEE80211_BAND_2GHZ: { @@ -150,11 +163,9 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, if (rate == -1) { /* No matching basic rate found; use highest suitable mandatory * PHY rate */ - rate = mrate; + rate = DIV_ROUND_UP(mrate, 1 << shift); } - shift = ieee80211_vif_get_shift(&tx->sdata->vif); - /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ if (ieee80211_is_data_qos(hdr->frame_control) && *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 61856e1..1e45891c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -141,14 +141,18 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len, dur = 16; /* SIFS + signal ext */ dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */ dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */ - dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, - 4 * rate); /* T_SYM x N_SYM */ /* IEEE 802.11-2012 18.3.2.4: all values above are: * * times 4 for 5 MHz * * times 2 for 10 MHz */ dur *= 1 << shift; + + /* rates should already consider the channel bandwidth, + * don't apply divisor again. + */ + dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, + 4 * rate); /* T_SYM x N_SYM */ } else { /* * 802.11b or 802.11g with 802.11b compatibility: @@ -205,7 +209,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; - int erp, shift = 0; + int erp, shift = 0, bitrate; u16 dur; struct ieee80211_supported_band *sband; @@ -224,14 +228,16 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, shift = ieee80211_vif_get_shift(vif); } + bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift); + /* CTS duration */ - dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate, + dur = ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble, shift); /* Data frame duration */ - dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, frame_len, bitrate, erp, short_preamble, shift); /* ACK duration */ - dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble, shift); return cpu_to_le16(dur); @@ -247,7 +253,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; - int erp, shift = 0; + int erp, shift = 0, bitrate; u16 dur; struct ieee80211_supported_band *sband; @@ -265,12 +271,14 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, shift = ieee80211_vif_get_shift(vif); } + bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift); + /* Data frame duration */ - dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, + dur = ieee80211_frame_duration(sband->band, frame_len, bitrate, erp, short_preamble, shift); if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ACK duration */ - dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble, shift); } @@ -1175,7 +1183,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, size_t buffer_len, const u8 *ie, size_t ie_len, enum ieee80211_band band, u32 rate_mask, - u8 channel) + struct cfg80211_chan_def *chandef) { struct ieee80211_supported_band *sband; u8 *pos = buffer, *end = buffer + buffer_len; @@ -1184,16 +1192,26 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, u8 rates[32]; int num_rates; int ext_rates_len; + int shift; + u32 rate_flags; sband = local->hw.wiphy->bands[band]; if (WARN_ON_ONCE(!sband)) return 0; + rate_flags = ieee80211_chandef_rate_flags(chandef); + shift = ieee80211_chandef_get_shift(chandef); + num_rates = 0; for (i = 0; i < sband->n_bitrates; i++) { if ((BIT(i) & rate_mask) == 0) continue; /* skip rate */ - rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5); + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + + rates[num_rates++] = + (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, + (1 << shift) * 5); } supp_rates_len = min_t(int, num_rates, 8); @@ -1233,12 +1251,13 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, pos += ext_rates_len; } - if (channel && sband->band == IEEE80211_BAND_2GHZ) { + if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) { if (end - pos < 3) goto out_err; *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; - *pos++ = channel; + *pos++ = ieee80211_frequency_to_channel( + chandef->chan->center_freq); } /* insert custom IEs that go before HT */ @@ -1303,9 +1322,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, bool directed) { struct ieee80211_local *local = sdata->local; + struct cfg80211_chan_def chandef; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - u8 chan_no; int ies_len; /* @@ -1313,10 +1332,11 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, * in order to maximize the chance that we get a response. Some * badly-behaved APs don't respond when this parameter is included. */ + chandef.width = sdata->vif.bss_conf.chandef.width; if (directed) - chan_no = 0; + chandef.chan = NULL; else - chan_no = ieee80211_frequency_to_channel(chan->center_freq); + chandef.chan = chan; skb = ieee80211_probereq_get(&local->hw, &sdata->vif, ssid, ssid_len, 100 + ie_len); @@ -1326,7 +1346,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb), skb_tailroom(skb), ie, ie_len, chan->band, - ratemask, chan_no); + ratemask, &chandef); skb_put(skb, ies_len); if (dst) { @@ -1360,16 +1380,19 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, } } -u32 ieee80211_sta_get_rates(struct ieee80211_local *local, +u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum ieee80211_band band, u32 *basic_rates) { struct ieee80211_supported_band *sband; struct ieee80211_rate *bitrates; size_t num_rates; - u32 supp_rates; - int i, j; - sband = local->hw.wiphy->bands[band]; + u32 supp_rates, rate_flags; + int i, j, shift; + sband = sdata->local->hw.wiphy->bands[band]; + + rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + shift = ieee80211_vif_get_shift(&sdata->vif); if (WARN_ON(!sband)) return 1; @@ -1394,7 +1417,15 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local, continue; for (j = 0; j < num_rates; j++) { - if (bitrates[j].bitrate == own_rate) { + int brate; + if ((rate_flags & sband->bitrates[j].flags) + != rate_flags) + continue; + + brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, + 1 << shift); + + if (brate == own_rate) { supp_rates |= BIT(j); if (basic_rates && is_basic) *basic_rates |= BIT(j); @@ -2017,18 +2048,56 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, cfg80211_chandef_create(chandef, control_chan, channel_type); } +int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef, + const struct ieee80211_supported_band *sband, + const u8 *srates, int srates_len, u32 *rates) +{ + u32 rate_flags = ieee80211_chandef_rate_flags(chandef); + int shift = ieee80211_chandef_get_shift(chandef); + struct ieee80211_rate *br; + int brate, rate, i, j, count = 0; + + *rates = 0; + + for (i = 0; i < srates_len; i++) { + rate = srates[i] & 0x7f; + + for (j = 0; j < sband->n_bitrates; j++) { + br = &sband->bitrates[j]; + if ((rate_flags & br->flags) != rate_flags) + continue; + + brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); + if (brate == rate) { + *rates |= BIT(j); + count++; + break; + } + } + } + return count; +} + int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, bool need_basic, enum ieee80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; - int rate; + int rate, shift; u8 i, rates, *pos; u32 basic_rates = sdata->vif.bss_conf.basic_rates; + u32 rate_flags; + shift = ieee80211_vif_get_shift(&sdata->vif); + rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); sband = local->hw.wiphy->bands[band]; - rates = sband->n_bitrates; + rates = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + rates++; + } if (rates > 8) rates = 8; @@ -2040,10 +2109,15 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, *pos++ = rates; for (i = 0; i < rates; i++) { u8 basic = 0; + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + if (need_basic && basic_rates & BIT(i)) basic = 0x80; rate = sband->bitrates[i].bitrate; - *pos++ = basic | (u8) (rate / 5); + rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = basic | (u8) rate; } return 0; @@ -2055,12 +2129,22 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; - int rate; + int rate, skip, shift; u8 i, exrates, *pos; u32 basic_rates = sdata->vif.bss_conf.basic_rates; + u32 rate_flags; + + rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + shift = ieee80211_vif_get_shift(&sdata->vif); sband = local->hw.wiphy->bands[band]; - exrates = sband->n_bitrates; + exrates = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + exrates++; + } + if (exrates > 8) exrates -= 8; else @@ -2073,12 +2157,19 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, exrates + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = exrates; + skip = 0; for (i = 8; i < sband->n_bitrates; i++) { u8 basic = 0; + if ((rate_flags & sband->bitrates[i].flags) + != rate_flags) + continue; + if (skip++ < 8) + continue; if (need_basic && basic_rates & BIT(i)) basic = 0x80; - rate = sband->bitrates[i].bitrate; - *pos++ = basic | (u8) (rate / 5); + rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, + 5 * (1 << shift)); + *pos++ = basic | (u8) rate; } } return 0; @@ -2162,9 +2253,17 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, ri.flags |= RATE_INFO_FLAGS_SHORT_GI; } else { struct ieee80211_supported_band *sband; + int shift = 0; + int bitrate; + + if (status->flag & RX_FLAG_10MHZ) + shift = 1; + if (status->flag & RX_FLAG_5MHZ) + shift = 2; sband = local->hw.wiphy->bands[status->band]; - ri.legacy = sband->bitrates[status->rate_idx].bitrate; + bitrate = sband->bitrates[status->rate_idx].bitrate; + ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift)); } rate = cfg80211_calculate_bitrate(&ri); -- cgit v0.10.2 From 74608aca4d82e2855b1a6a2cd60331d1a98f2d6a Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:54 +0200 Subject: cfg80211/mac80211: get mandatory rates based on scan width Mandatory rates for 5 and 10 MHz are different from the rates used for 20 MHz in 2.4 GHz mode, as they use OFDM only. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bae8614..88f1563 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3128,11 +3128,13 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, /** * ieee80211_mandatory_rates - get mandatory rates for a given band * @sband: the band to look for rates in + * @scan_width: width of the control channel * * This function returns a bitmap of the mandatory rates for the given * band, bits are set according to the rate position in the bitrates array. */ -u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband); +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, + enum nl80211_bss_scan_width scan_width); /* * Radiotap parsing functions -- for controlled injection support diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 272a3b3..299603e 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -371,6 +371,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; + enum nl80211_bss_scan_width scan_width; int band; /* @@ -399,6 +400,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, if (WARN_ON_ONCE(!chanctx_conf)) return NULL; band = chanctx_conf->def.chan->band; + scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_KERNEL); @@ -412,7 +414,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(sband); + ieee80211_mandatory_rates(sband, scan_width); return ieee80211_ibss_finish_sta(sta); } @@ -476,6 +478,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, u64 beacon_timestamp, rx_timestamp; u32 supp_rates = 0; enum ieee80211_band band = rx_status->band; + enum nl80211_bss_scan_width scan_width; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; bool rates_updated = false; @@ -504,9 +507,15 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, prev_rates = sta->sta.supp_rates[band]; /* make sure mandatory rates are always added */ - sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(sband); + scan_width = NL80211_BSS_CHAN_WIDTH_20; + if (rx_status->flag & RX_FLAG_5MHZ) + scan_width = NL80211_BSS_CHAN_WIDTH_5; + if (rx_status->flag & RX_FLAG_10MHZ) + scan_width = NL80211_BSS_CHAN_WIDTH_10; + sta->sta.supp_rates[band] = supp_rates | + ieee80211_mandatory_rates(sband, + scan_width); if (sta->sta.supp_rates[band] != prev_rates) { ibss_dbg(sdata, "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n", @@ -640,6 +649,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; + enum nl80211_bss_scan_width scan_width; int band; /* @@ -665,6 +675,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, return; } band = chanctx_conf->def.chan->band; + scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def); rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); @@ -676,7 +687,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(sband); + ieee80211_mandatory_rates(sband, scan_width); spin_lock(&ifibss->incomplete_lock); list_add(&sta->list, &ifibss->incomplete_stations); diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 30c4920..0553fd4 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -167,9 +167,12 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, * basic rates */ if (!setup->basic_rates) { + enum nl80211_bss_scan_width scan_width; struct ieee80211_supported_band *sband = rdev->wiphy.bands[setup->chandef.chan->band]; - setup->basic_rates = ieee80211_mandatory_rates(sband); + scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); + setup->basic_rates = ieee80211_mandatory_rates(sband, + scan_width); } if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) diff --git a/net/wireless/util.c b/net/wireless/util.c index 74458b7..ce090c1 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -33,7 +33,8 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, } EXPORT_SYMBOL(ieee80211_get_response_rate); -u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband) +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, + enum nl80211_bss_scan_width scan_width) { struct ieee80211_rate *bitrates; u32 mandatory_rates = 0; @@ -43,10 +44,15 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband) if (WARN_ON(!sband)) return 1; - if (sband->band == IEEE80211_BAND_2GHZ) - mandatory_flag = IEEE80211_RATE_MANDATORY_B; - else + if (sband->band == IEEE80211_BAND_2GHZ) { + if (scan_width == NL80211_BSS_CHAN_WIDTH_5 || + scan_width == NL80211_BSS_CHAN_WIDTH_10) + mandatory_flag = IEEE80211_RATE_MANDATORY_G; + else + mandatory_flag = IEEE80211_RATE_MANDATORY_B; + } else { mandatory_flag = IEEE80211_RATE_MANDATORY_A; + } bitrates = sband->bitrates; for (i = 0; i < sband->n_bitrates; i++) -- cgit v0.10.2 From 0430c883470d0c9a23661ea9f02c56b1d91cf93c Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:55 +0200 Subject: cfg80211/mac80211: use reduced txpower for 5 and 10 MHz Some regulations (like germany, but also FCC) express their transmission power limit in dBm/MHz or mW/MHz. To cope with that and be on the safe side, reduce the maximum power to half (10 MHz) or quarter (5 MHz) when operating on these reduced bandwidth channels. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 88f1563..aeaf6df 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -461,6 +461,33 @@ ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) } /** + * ieee80211_chandef_max_power - maximum transmission power for the chandef + * + * In some regulations, the transmit power may depend on the configured channel + * bandwidth which may be defined as dBm/MHz. This function returns the actual + * max_power for non-standard (20 MHz) channels. + * + * @chandef: channel definition for the channel + * + * Returns: maximum allowed transmission power in dBm for the chandef + */ +static inline int +ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return min(chandef->chan->max_reg_power - 6, + chandef->chan->max_power); + case NL80211_CHAN_WIDTH_10: + return min(chandef->chan->max_reg_power - 3, + chandef->chan->max_power); + default: + break; + } + return chandef->chan->max_power; +} + +/** * enum survey_info_flags - survey information flags * * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index cc11759..4c41c11 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -54,7 +54,7 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) return false; } - power = chanctx_conf->def.chan->max_power; + power = ieee80211_chandef_max_power(&chanctx_conf->def); rcu_read_unlock(); if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 091088a..6da98c7 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -151,7 +151,7 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) changed |= IEEE80211_CONF_CHANGE_SMPS; } - power = chandef.chan->max_power; + power = ieee80211_chandef_max_power(&chandef); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f7552c2..211246b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -747,7 +747,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) *pos++ = WLAN_EID_PWR_CAPABILITY; *pos++ = 2; *pos++ = 0; /* min tx power */ - *pos++ = chan->max_power; /* max tx power */ + /* max tx power */ + *pos++ = ieee80211_chandef_max_power(&chanctx_conf->def); /* 2. supported channels */ /* TODO: get this in reg domain format */ -- cgit v0.10.2 From 7ca15a0ae865067aac8d36e27e0acbe4a8f1e70a Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:56 +0200 Subject: mac80211: allow scanning for 5/10 MHz channels in IBSS Use a chandef instead of just the channel for scanning, and enable 5/10 Mhz scanning for IBSS mode. Also reporting is changed to the new inform_bss functions. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 299603e..8fa1c27 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, u32 bss_change, rate_flags, rates = 0, rates_added = 0; u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; struct cfg80211_chan_def chandef; + enum nl80211_bss_scan_width scan_width; struct beacon_data *presp; int frame_len; int shift; @@ -271,8 +272,10 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); - bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan, - mgmt, presp->head_len, 0, GFP_KERNEL); + scan_width = cfg80211_chandef_to_scan_width(&chandef); + bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan, + scan_width, mgmt, + presp->head_len, 0, GFP_KERNEL); cfg80211_put_bss(local->hw.wiphy, bss); netif_carrier_on(sdata->dev); cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL); @@ -726,6 +729,7 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + enum nl80211_bss_scan_width scan_width; sdata_assert_lock(sdata); @@ -747,8 +751,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) sdata_info(sdata, "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n"); + scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, - NULL); + NULL, scan_width); } static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) @@ -798,6 +803,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) struct cfg80211_bss *cbss; struct ieee80211_channel *chan = NULL; const u8 *bssid = NULL; + enum nl80211_bss_scan_width scan_width; int active_ibss; u16 capability; @@ -846,8 +852,10 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) IEEE80211_SCAN_INTERVAL)) { sdata_info(sdata, "Trigger new scan to find an IBSS to join\n"); + scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef); ieee80211_request_ibss_scan(sdata, ifibss->ssid, - ifibss->ssid_len, chan); + ifibss->ssid_len, chan, + scan_width); } else { int interval = IEEE80211_SCAN_INTERVAL; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6596da6..23a191b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1057,7 +1057,7 @@ struct ieee80211_local { struct cfg80211_ssid scan_ssid; struct cfg80211_scan_request *int_scan_req; struct cfg80211_scan_request *scan_req, *hw_scan_req; - struct ieee80211_channel *scan_channel; + struct cfg80211_chan_def scan_chandef; enum ieee80211_band hw_scan_band; int scan_channel_idx; int scan_ies_len; @@ -1337,7 +1337,8 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, void ieee80211_scan_work(struct work_struct *work); int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, const u8 *ssid, u8 ssid_len, - struct ieee80211_channel *chan); + struct ieee80211_channel *chan, + enum nl80211_bss_scan_width scan_width); int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req); void ieee80211_scan_cancel(struct ieee80211_local *local); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 6da98c7..25eb35b 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -102,17 +102,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; - if (local->scan_channel) { - chandef.chan = local->scan_channel; - /* If scanning on oper channel, use whatever channel-type - * is currently in use. - */ - if (chandef.chan == local->_oper_chandef.chan) { - chandef = local->_oper_chandef; - } else { - chandef.width = NL80211_CHAN_WIDTH_20_NOHT; - chandef.center_freq1 = chandef.chan->center_freq; - } + if (local->scan_chandef.chan) { + chandef = local->scan_chandef; } else if (local->tmp_channel) { chandef.chan = local->tmp_channel; chandef.width = NL80211_CHAN_WIDTH_20_NOHT; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 819d095..08afe74 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -66,6 +66,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local, struct cfg80211_bss *cbss; struct ieee80211_bss *bss; int clen, srlen; + enum nl80211_bss_scan_width scan_width; s32 signal = 0; if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) @@ -73,8 +74,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local, else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) signal = (rx_status->signal * 100) / local->hw.max_signal; - cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel, - mgmt, len, signal, GFP_ATOMIC); + scan_width = NL80211_BSS_CHAN_WIDTH_20; + if (rx_status->flag & RX_FLAG_5MHZ) + scan_width = NL80211_BSS_CHAN_WIDTH_5; + if (rx_status->flag & RX_FLAG_10MHZ) + scan_width = NL80211_BSS_CHAN_WIDTH_10; + + cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel, + scan_width, mgmt, len, signal, + GFP_ATOMIC); if (!cbss) return NULL; @@ -300,7 +308,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, rcu_assign_pointer(local->scan_sdata, NULL); local->scanning = 0; - local->scan_channel = NULL; + local->scan_chandef.chan = NULL; /* Set power back to normal operating levels. */ ieee80211_hw_config(local, 0); @@ -635,11 +643,34 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, { int skip; struct ieee80211_channel *chan; + enum nl80211_bss_scan_width oper_scan_width; skip = 0; chan = local->scan_req->channels[local->scan_channel_idx]; - local->scan_channel = chan; + local->scan_chandef.chan = chan; + local->scan_chandef.center_freq1 = chan->center_freq; + local->scan_chandef.center_freq2 = 0; + switch (local->scan_req->scan_width) { + case NL80211_BSS_CHAN_WIDTH_5: + local->scan_chandef.width = NL80211_CHAN_WIDTH_5; + break; + case NL80211_BSS_CHAN_WIDTH_10: + local->scan_chandef.width = NL80211_CHAN_WIDTH_10; + break; + case NL80211_BSS_CHAN_WIDTH_20: + /* If scanning on oper channel, use whatever channel-type + * is currently in use. + */ + oper_scan_width = cfg80211_chandef_to_scan_width( + &local->_oper_chandef); + if (chan == local->_oper_chandef.chan && + oper_scan_width == local->scan_req->scan_width) + local->scan_chandef = local->_oper_chandef; + else + local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT; + break; + } if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) skip = 1; @@ -679,7 +710,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local, unsigned long *next_delay) { /* switch back to the operating channel */ - local->scan_channel = NULL; + local->scan_chandef.chan = NULL; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); /* disable PS */ @@ -821,7 +852,8 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, const u8 *ssid, u8 ssid_len, - struct ieee80211_channel *chan) + struct ieee80211_channel *chan, + enum nl80211_bss_scan_width scan_width) { struct ieee80211_local *local = sdata->local; int ret = -EBUSY; @@ -871,6 +903,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, local->int_scan_req->ssids = &local->scan_ssid; local->int_scan_req->n_ssids = 1; + local->int_scan_req->scan_width = scan_width; memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); local->int_scan_req->ssids[0].ssid_len = ssid_len; -- cgit v0.10.2 From 4b42aab1e8fcac2e9b835a7d93bf30dc9a032faa Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:57 +0200 Subject: mac80211: return if IBSS chandef can not be used This was originally designed to fail when a 40+/40- mode can not be used, but basic modes (such as 5/10/20 MHz) must be handled with an error. Signed-off-by: Simon Wunderlich Signed-off-by: Johannes Berg diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 8fa1c27..449702d 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -85,6 +85,14 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, chandef = ifibss->chandef; if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { + if (chandef.width == NL80211_CHAN_WIDTH_5 || + chandef.width == NL80211_CHAN_WIDTH_10 || + chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + chandef.width == NL80211_CHAN_WIDTH_20) { + sdata_info(sdata, + "Failed to join IBSS, beacons forbidden\n"); + return; + } chandef.width = NL80211_CHAN_WIDTH_20; chandef.center_freq1 = chan->center_freq; } -- cgit v0.10.2 From bf3726457276c8773ec97da30c6459caf512b22f Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 8 Jul 2013 16:55:58 +0200 Subject: nl80211: allow 5 and 10 MHz channels for IBSS Whether the wiphy supports it or not is already checked, so what is left is to enable these channel types. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ef4c312c..03d4ef9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6355,6 +6355,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) return -EINVAL; switch (ibss.chandef.width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: break; case NL80211_CHAN_WIDTH_20: -- cgit v0.10.2 From 2ec9c1f67ab1f58b5bf5ac19e4b61b9f75c83a04 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 11 Jul 2013 18:07:46 +0200 Subject: mac80211: fix regression when initializing ibss wmm params There appear to be two regressions in ibss.c when calling ieee80211_sta_def_wmm_params(): * the second argument should be a rate length, not a rate array. This was introduced by my commit "mac80211: select and adjust bitrates according to channel mode" * the third argument is not initialized (anymore), making further checks within this function useless. Since ieee80211_sta_def_wmm_params() is only used by ibss anyway, remove the function entirely and handle the operating mode decision immediately. Signed-off-by: Simon Wunderlich Signed-off-by: Johannes Berg diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 449702d..83197c3 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -49,9 +49,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband; struct cfg80211_bss *bss; u32 bss_change, rate_flags, rates = 0, rates_added = 0; - u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; struct cfg80211_chan_def chandef; enum nl80211_bss_scan_width scan_width; + bool have_higher_than_11mbit = false; struct beacon_data *presp; int frame_len; int shift; @@ -149,6 +149,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; + if (sband->bitrates[i].bitrate > 110) + have_higher_than_11mbit = true; rates |= BIT(i); rates_n++; @@ -270,11 +272,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ; bss_change |= BSS_CHANGED_ERP_SLOT; + /* cf. IEEE 802.11 9.2.12 */ + if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit) + sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; + else + sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; + sdata->vif.bss_conf.ibss_joined = true; sdata->vif.bss_conf.ibss_creator = creator; ieee80211_bss_info_change_notify(sdata, bss_change); - ieee80211_sta_def_wmm_params(sdata, rates, supp_rates); + ieee80211_set_wmm_default(sdata, true); ifibss->state = IEEE80211_IBSS_MLME_JOINED; mod_timer(&ifibss->timer, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 23a191b..3d32df1 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1615,9 +1615,6 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, u32 ratemask, bool directed, u32 tx_flags, struct ieee80211_channel *channel, bool scan); -void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, - const size_t supp_rates_len, - const u8 *supp_rates); u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum ieee80211_band band, u32 *basic_rates); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 1e45891c..d23c5a7 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1073,32 +1073,6 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, } } -void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, - const size_t supp_rates_len, - const u8 *supp_rates) -{ - struct ieee80211_chanctx_conf *chanctx_conf; - int i, have_higher_than_11mbit = 0; - - /* cf. IEEE 802.11 9.2.12 */ - for (i = 0; i < supp_rates_len; i++) - if ((supp_rates[i] & 0x7f) * 5 > 110) - have_higher_than_11mbit = 1; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - - if (chanctx_conf && - chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ && - have_higher_than_11mbit) - sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; - else - sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; - rcu_read_unlock(); - - ieee80211_set_wmm_default(sdata, true); -} - void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, -- cgit v0.10.2 From 93c75786e8d8e8e245a479209a84b19292d08e7e Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 11 Jul 2013 20:29:49 +0200 Subject: mac80211: fix off-by-one regression in ibss beacon generation There is an off-by-one error in the beacon generation for the ibss mode, falsely a rate the extended supported rates which was already added to supported rates, messing up the beacon. This was introduced by commit "mac80211: select and adjust bitrates according to channel mode". Signed-off-by: Simon Wunderlich Signed-off-by: Johannes Berg diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 83197c3..5e6836c 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -168,8 +168,10 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, if (basic_rates & BIT(ri)) basic = 0x80; *pos++ = basic | (u8) rate; - if (++rates_added == 8) + if (++rates_added == 8) { + ri++; /* continue at next rate for EXT_SUPP_RATES */ break; + } } if (sband->band == IEEE80211_BAND_2GHZ) { -- cgit v0.10.2 From b60e527a729cbb9c88b49cc10738a13328cd1ed2 Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Fri, 12 Jul 2013 18:55:22 +0800 Subject: mac80211: set forwarding in mesh capability info Set the Forwarding bit in Mesh Capability Info according to dot11MeshForwarding as defined in IEEE 802.11-2012 section 8.4.2.100.8. Signed-off-by: Chun-Yeow Yeoh Signed-off-by: Johannes Berg diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index e536f22..885a5f6 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -273,7 +273,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); *pos++ = neighbors << 1; /* Mesh capability */ - *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; + *pos = 0x00; + *pos |= ifmsh->mshcfg.dot11MeshForwarding ? + IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00; *pos |= ifmsh->accepting_plinks ? IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ -- cgit v0.10.2 From fe0f3cd2a6a1f16fcaffec3e69e918bcf40ec76d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 14 Jul 2013 23:37:14 +0300 Subject: mac80211_hwsim: use ieee80211_free_txskb These are only strange error cases, so it's not really all that important, but the driver really should use ieee80211_free_txskb() instead of just dev_kfree_skb(). Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index cb34c78..11adae8 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -867,7 +867,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, if (WARN_ON(skb->len < 10)) { /* Should not happen; just a sanity check for addr1 use */ - dev_kfree_skb(skb); + ieee80211_free_txskb(hw, skb); return; } @@ -884,13 +884,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, } if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) { - dev_kfree_skb(skb); + ieee80211_free_txskb(hw, skb); return; } if (data->idle && !data->tmp_chan) { wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); - dev_kfree_skb(skb); + ieee80211_free_txskb(hw, skb); return; } -- cgit v0.10.2 From 2f5286e819b16f5dbf793f97f841333eedab5d68 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 14 Jul 2013 23:38:33 +0300 Subject: mac80211_hwsim: claim uAPSD support Since mac80211 does everything, we can just claim it. Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 11adae8..0a439f8 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2309,7 +2309,8 @@ static int __init init_mac80211_hwsim(void) hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_AP_UAPSD; /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); -- cgit v0.10.2 From 5fd91aac790d6817dc99032d2774bc88f6ee4805 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 14 Jul 2013 23:42:08 +0300 Subject: mac80211_hwsim: claim active monitor support It seems to actually work this way already, so we may need to do some work to make monitor interfaces be _not_ active in hwsim instead. Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0a439f8..7b2a622 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2311,6 +2311,7 @@ static int __init init_mac80211_hwsim(void) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_UAPSD; + hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); -- cgit v0.10.2 From c82b5a74cc739385db6e4275fe504a0e9469bf01 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 14 Jul 2013 23:39:20 +0300 Subject: mac80211: make active monitor injection work w/ HW queue When a driver (like hwsim) uses HW queue control an active monitor vif needs to be used for the queues, make the code do that. Otherwise we'd bail out and drop the frames. Signed-off-by: Johannes Berg diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f82301b..be4d3ca 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1272,6 +1272,10 @@ static bool __ieee80211_tx(struct ieee80211_local *local, switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) { + vif = &sdata->vif; + break; + } sdata = rcu_dereference(local->monitor_sdata); if (sdata) { vif = &sdata->vif; -- cgit v0.10.2 From 313b0a294f8cc92be4387186e8c9eef59c1c198a Mon Sep 17 00:00:00 2001 From: Inbal Hacohen Date: Mon, 24 Jun 2013 10:35:53 +0300 Subject: iwlwifi: move dump_fh into common code This means it can be shared for different transport layers in the future. Signed-off-by: Inbal Hacohen Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c index 305c81f..dfa4d2e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/iwlwifi/iwl-io.c @@ -33,6 +33,8 @@ #include "iwl-io.h" #include "iwl-csr.h" #include "iwl-debug.h" +#include "iwl-fh.h" +#include "iwl-csr.h" #define IWL_POLL_INTERVAL 10 /* microseconds */ @@ -166,3 +168,68 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) } } IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); + +static const char *get_fh_string(int cmd) +{ +#define IWL_CMD(x) case x: return #x + switch (cmd) { + IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); + IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); + IWL_CMD(FH_RSCSR_CHNL0_WPTR); + IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); + IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); + IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); + IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); + IWL_CMD(FH_TSSR_TX_STATUS_REG); + IWL_CMD(FH_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + } +#undef IWL_CMD +} + +int iwl_dump_fh(struct iwl_trans *trans, char **buf) +{ + int i; + static const u32 fh_tbl[] = { + FH_RSCSR_CHNL0_STTS_WPTR_REG, + FH_RSCSR_CHNL0_RBDCB_BASE_REG, + FH_RSCSR_CHNL0_WPTR, + FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_MEM_RSSR_SHARED_CTRL_REG, + FH_MEM_RSSR_RX_STATUS_REG, + FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, + FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_ERROR_REG + }; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (buf) { + int pos = 0; + size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + + pos += scnprintf(*buf + pos, bufsz - pos, + "FH register values:\n"); + + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) + pos += scnprintf(*buf + pos, bufsz - pos, + " %34s: 0X%08x\n", + get_fh_string(fh_tbl[i]), + iwl_read_direct32(trans, fh_tbl[i])); + + return pos; + } +#endif + + IWL_ERR(trans, "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) + IWL_ERR(trans, " %34s: 0X%08x\n", + get_fh_string(fh_tbl[i]), + iwl_read_direct32(trans, fh_tbl[i])); + + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h index fd9f5b9..63d10ec0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -77,4 +77,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, u32 bits, u32 mask); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); +/* Error handling */ +int iwl_dump_fh(struct iwl_trans *trans, char **buf); + #endif diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index b654dcd..fa22639 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -392,7 +392,6 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); /***************************************************** * Error handling ******************************************************/ -int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf); void iwl_pcie_dump_csr(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index fd848cd..5fdb4ee 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -793,7 +793,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) } iwl_pcie_dump_csr(trans); - iwl_pcie_dump_fh(trans, NULL); + iwl_dump_fh(trans, NULL); set_bit(STATUS_FW_ERROR, &trans_pcie->status); clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 826c156..82194e8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1033,71 +1033,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } -static const char *get_fh_string(int cmd) -{ -#define IWL_CMD(x) case x: return #x - switch (cmd) { - IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); - IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); - IWL_CMD(FH_RSCSR_CHNL0_WPTR); - IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); - IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); - IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); - IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); - IWL_CMD(FH_TSSR_TX_STATUS_REG); - IWL_CMD(FH_TSSR_TX_ERROR_REG); - default: - return "UNKNOWN"; - } -#undef IWL_CMD -} - -int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf) -{ - int i; - static const u32 fh_tbl[] = { - FH_RSCSR_CHNL0_STTS_WPTR_REG, - FH_RSCSR_CHNL0_RBDCB_BASE_REG, - FH_RSCSR_CHNL0_WPTR, - FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_MEM_RSSR_SHARED_CTRL_REG, - FH_MEM_RSSR_RX_STATUS_REG, - FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, - FH_TSSR_TX_STATUS_REG, - FH_TSSR_TX_ERROR_REG - }; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (buf) { - int pos = 0; - size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; - - *buf = kmalloc(bufsz, GFP_KERNEL); - if (!*buf) - return -ENOMEM; - - pos += scnprintf(*buf + pos, bufsz - pos, - "FH register values:\n"); - - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) - pos += scnprintf(*buf + pos, bufsz - pos, - " %34s: 0X%08x\n", - get_fh_string(fh_tbl[i]), - iwl_read_direct32(trans, fh_tbl[i])); - - return pos; - } -#endif - - IWL_ERR(trans, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) - IWL_ERR(trans, " %34s: 0X%08x\n", - get_fh_string(fh_tbl[i]), - iwl_read_direct32(trans, fh_tbl[i])); - - return 0; -} - static const char *get_csr_string(int cmd) { #define IWL_CMD(x) case x: return #x @@ -1390,7 +1325,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, int pos = 0; ssize_t ret = -EFAULT; - ret = pos = iwl_pcie_dump_fh(trans, &buf); + ret = pos = iwl_dump_fh(trans, &buf); if (buf) { ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); -- cgit v0.10.2 From 2be01fa8f5cd6feffb6c50dae20d846dad8b37ba Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 26 Jun 2013 13:46:20 +0200 Subject: iwlwifi: remove forward debugfs function declarations There's no need to have 'forward' debugfs function declarations as part of the macros because the macros are always used after the static functions are defined already, so remove them. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index d532948..d94f8ab 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -69,19 +69,7 @@ } while (0) /* file operation */ -#define DEBUGFS_READ_FUNC(name) \ -static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ - char __user *user_buf, \ - size_t count, loff_t *ppos); - -#define DEBUGFS_WRITE_FUNC(name) \ -static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ - const char __user *user_buf, \ - size_t count, loff_t *ppos); - - #define DEBUGFS_READ_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ @@ -89,7 +77,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ - DEBUGFS_WRITE_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .open = simple_open, \ @@ -98,8 +85,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ - DEBUGFS_WRITE_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 82194e8..bc908d3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1113,18 +1113,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans) } while (0) /* file operation */ -#define DEBUGFS_READ_FUNC(name) \ -static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ - char __user *user_buf, \ - size_t count, loff_t *ppos); - -#define DEBUGFS_WRITE_FUNC(name) \ -static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ - const char __user *user_buf, \ - size_t count, loff_t *ppos); - #define DEBUGFS_READ_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ @@ -1132,7 +1121,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ - DEBUGFS_WRITE_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .open = simple_open, \ @@ -1140,8 +1128,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ }; #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name); \ - DEBUGFS_WRITE_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ -- cgit v0.10.2 From e126b5d9c58870c0866357c951b4da9ed005f364 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 28 Jun 2013 13:39:18 +0200 Subject: iwlwifi: mvm: remove unneeded argument from iwl_mvm_tx_protection() The LQ command argument isn't needed, it's always taken from the station struct that's already passed, remove the argument. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index b328a98..3856c28 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -3193,13 +3193,14 @@ void iwl_mvm_rate_control_unregister(void) * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable * Tx protection, according to this rquest and previous requests, * and send the LQ command. - * @lq: The LQ command * @mvmsta: The station * @enable: Enable Tx protection? */ -int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, - struct iwl_mvm_sta *mvmsta, bool enable) +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable) { + struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq; + lockdep_assert_held(&mvm->mutex); if (enable) { diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index cff4f6d..29d699a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -404,7 +404,7 @@ extern void iwl_mvm_rate_control_unregister(void); struct iwl_mvm_sta; -int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, - struct iwl_mvm_sta *mvmsta, bool enable); +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable); #endif /* __rs__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 62fe520..0321bd3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -807,8 +807,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * method for HT traffic * this function also sends the LQ command */ - return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, - mvmsta, true); + return iwl_mvm_tx_protection(mvm, mvmsta, true); /* * TODO: remove the TLC_RTS flag when we tear down the last * AGG session (agg_tids_count in DVM) diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c index d6ae7f1..f5ba185 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -391,8 +391,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) mvmsta = (void *)sta->drv_priv; if (enable == mvmsta->tt_tx_protection) continue; - err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, - mvmsta, enable); + err = iwl_mvm_tx_protection(mvm, mvmsta, enable); if (err) { IWL_ERR(mvm, "Failed to %s Tx protection\n", enable ? "enable" : "disable"); -- cgit v0.10.2 From f2532b04b2ecde7c71ff30d0b26bd6d6166c4bf6 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 2 Jul 2013 15:47:29 +0300 Subject: iwlwifi: pcie: don't disable L1 for newest NICs In newest NICs (7000 family and up), L1 is supported, so avoid to disable it. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 22b7fa5..7f61674 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -99,6 +99,7 @@ static const struct iwl_base_params iwl7000_base_params = { .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, + .pcie_l1_allowed = true, }; static const struct iwl_ht_params iwl7000_ht_params = { diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 83b9ff6..87fe955 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -152,6 +152,7 @@ struct iwl_base_params { unsigned int wd_timeout; u32 max_event_log_size; const bool shadow_reg_enable; + const bool pcie_l1_allowed; }; /* diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index bc908d3..cec0c89 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1418,10 +1418,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->reg_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); - /* W/A - seems to solve weird behavior. We need to remove this if we - * don't want to stay in L1 all the time. This wastes a lot of power */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); + if (!cfg->base_params->pcie_l1_allowed) { + /* + * W/A - seems to solve weird behavior. We need to remove this + * if we don't want to stay in L1 all the time. This wastes a + * lot of power. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + } if (pci_enable_device(pdev)) { err = -ENODEV; -- cgit v0.10.2 From 961de6a5ee568881321cf97e826cc37e6f9bcf84 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 4 Jul 2013 18:00:08 +0200 Subject: iwlwifi: pcie: make unused queue warning more readable Use the return value of WARN_ONCE() and add a message with the queue ID that's getting used. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index c47c921..134f7a1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1619,10 +1619,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, txq = &trans_pcie->txq[txq_id]; q = &txq->q; - if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { - WARN_ON_ONCE(1); + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + "TX on unused queue %d\n", txq_id)) return -EINVAL; - } spin_lock(&txq->lock); -- cgit v0.10.2 From 0c393d4eac31912ad6ea362d4f9bf78aa1fe9a69 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Thu, 4 Jul 2013 14:47:30 +0300 Subject: iwlwifi: mvm: sram hex dump on NIC error Add sram dump on NIC error for debug improvement. Signed-off-by: Matti Gottlieb Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d40d7db..3aaecbc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -524,6 +524,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); +void iwl_mvm_dump_sram(struct iwl_mvm *mvm); u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index af79a14..0a4eb27 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -678,6 +678,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); iwl_mvm_dump_nic_error_log(mvm); + if (!iwlwifi_mod_params.restart_fw) + iwl_mvm_dump_sram(mvm); iwl_mvm_nic_restart(mvm); } diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 1e13328..a9c3574 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -453,6 +453,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); } +void iwl_mvm_dump_sram(struct iwl_mvm *mvm) +{ + const struct fw_img *img; + int ofs, len = 0; + u8 *buf; + + if (!mvm->ucode_loaded) + return; + + img = &mvm->fw->img[mvm->cur_ucode]; + ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return; + + iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len); + iwl_print_hex_error(mvm->trans, buf, len); + + kfree(buf); +} + /** * iwl_mvm_send_lq_cmd() - Send link quality command * @init: This command is sent as part of station initialization right -- cgit v0.10.2 From e811ada7a6a3f720c178ba29998ce9f9685f9df3 Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Sun, 10 Mar 2013 15:29:44 +0200 Subject: iwlwifi: mvm: Upgrade to a new power management uAPSD API Change power management implementation to support new host-device API containing uAPSD parameters. Verify FW support for this new API. Use the new power table command (0xA9) to configure power management. Use the legacy command (0x77) if FW does not support the new API. New file power_legacy.c is introduced for legacy implementation. Signed-off-by: Alexander Bondar Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index f844d5c..f26f126 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -74,6 +74,7 @@ * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS + * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD */ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_PAN = BIT(0), @@ -81,6 +82,7 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_MFP = BIT(2), IWL_UCODE_TLV_FLAGS_P2P = BIT(3), IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), + IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), }; /* The default calibrate table size if not specified by firmware file */ diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile index ff856e5..6d73817 100644 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/iwlwifi/mvm/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o iwlmvm-y += scan.o time-event.o rs.o -iwlmvm-y += power.o bt-coex.o +iwlmvm-y += power.o power_legacy.o bt-coex.o iwlmvm-y += led.o tt.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o iwlmvm-$(CONFIG_PM_SLEEP) += d3.o diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index e56ed2a..5d669da 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -424,40 +424,11 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file, struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->dbgfs_data; - struct iwl_powertable_cmd cmd = {}; char buf[256]; int bufsz = sizeof(buf); - int pos = 0; + int pos; - iwl_mvm_power_build_cmd(mvm, vif, &cmd); - - pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", - (cmd.flags & - cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? - 0 : 1); - pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", - le32_to_cpu(cmd.skip_dtim_periods)); - pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", - iwlmvm_mod_params.power_scheme); - pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", - le16_to_cpu(cmd.flags)); - pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", - cmd.keep_alive_seconds); - - if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { - pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", - (cmd.flags & - cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? - 1 : 0); - pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", - le32_to_cpu(cmd.rx_data_timeout)); - pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", - le32_to_cpu(cmd.tx_data_timeout)); - if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) - pos += scnprintf(buf+pos, bufsz-pos, - "lprx_rssi_threshold = %d\n", - le32_to_cpu(cmd.lprx_rssi_threshold)); - } + pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index a6da359..300211d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -79,6 +79,10 @@ * '1' Driver enables PM (use rest of parameters) * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, * '1' PM could sleep over DTIM till listen Interval. + * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all + * access categories are both delivery and trigger enabled. + * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and + * PBW Snoozing enabled * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. */ @@ -86,6 +90,8 @@ enum iwl_power_flags { POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), + POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), + POWER_FLAGS_BT_SCO_ENA = BIT(8), POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), POWER_FLAGS_LPRX_ENA_MSK = BIT(11), }; @@ -93,7 +99,8 @@ enum iwl_power_flags { #define IWL_POWER_VEC_SIZE 5 /** - * struct iwl_powertable_cmd - Power Table Command + * struct iwl_powertable_cmd - legacy power command. Beside old API support this + * is used also with a new power API for device wide power settings. * POWER_TABLE_CMD = 0x77 (command, has simple generic response) * * @flags: Power table command flags from POWER_FLAGS_* @@ -125,6 +132,72 @@ struct iwl_powertable_cmd { } __packed; /** + * struct iwl_mac_power_cmd - New power command containing uAPSD support + * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) + * @id_and_color: MAC contex identifier + * @flags: Power table command flags from POWER_FLAGS_* + * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. + * Minimum allowed:- 3 * DTIM. Keep alive period must be + * set regardless of power scheme or current power state. + * FW use this value also when PM is disabled. + * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to + * PSM transition - legacy PM + * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to + * PSM transition - legacy PM + * @sleep_interval: not in use + * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag + * is set. For example, if it is required to skip over + * one DTIM, this value need to be set to 2 (DTIM periods). + * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to + * PSM transition - uAPSD + * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to + * PSM transition - uAPSD + * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. + * Default: 80dbm + * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set + * @snooze_interval: TBD + * @snooze_window: TBD + * @snooze_step: TBD + * @qndp_tid: TID client shall use for uAPSD QNDP triggers + * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for + * each corresponding AC. + * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. + * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct + * values. + * @heavy_traffic_thr_tx_pkts: TX threshold measured in number of packets + * @heavy_traffic_thr_rx_pkts: RX threshold measured in number of packets + * @heavy_traffic_thr_tx_load: TX threshold measured in load's percentage + * @heavy_traffic_thr_rx_load: RX threshold measured in load's percentage + * @limited_ps_threshold: +*/ +struct iwl_mac_power_cmd { + /* CONTEXT_DESC_API_T_VER_1 */ + __le32 id_and_color; + + /* CLIENT_PM_POWER_TABLE_S_VER_1 */ + __le16 flags; + __le16 keep_alive_seconds; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 rx_data_timeout_uapsd; + __le32 tx_data_timeout_uapsd; + u8 lprx_rssi_threshold; + u8 skip_dtim_periods; + __le16 snooze_interval; + __le16 snooze_window; + u8 snooze_step; + u8 qndp_tid; + u8 uapsd_ac_flags; + u8 uapsd_max_sp; + u8 heavy_traffic_threshold_tx_packets; + u8 heavy_traffic_threshold_rx_packets; + u8 heavy_traffic_threshold_tx_percentage; + u8 heavy_traffic_threshold_rx_percentage; + u8 limited_ps_threshold; + u8 reserved; +} __packed; + +/** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @id_and_color: MAC contex identifier diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index cbfb3be..44614f5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -136,7 +136,7 @@ enum { CALIB_RES_NOTIF_PHY_DB = 0x6b, /* PHY_DB_CMD = 0x6c, */ - /* Power */ + /* Power - legacy power table command */ POWER_TABLE_CMD = 0x77, /* Thermal Throttling*/ @@ -166,6 +166,9 @@ enum { MISSED_BEACONS_NOTIFICATION = 0xa2, + /* Power - new power table command */ + MAC_PM_POWER_TABLE = 0xa9, + REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, BA_NOTIF = 0xc5, diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e08683b..2d07605 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -774,9 +774,14 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update quotas\n"); } - ret = iwl_mvm_power_update_mode(mvm, vif); - if (ret) - IWL_ERR(mvm, "failed to update power mode\n"); + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) { + /* Workaround for FW bug, otherwise FW disables device + * power save upon disassociation + */ + ret = iwl_mvm_power_update_mode(mvm, vif); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } } else if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon _after_ association so diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 3aaecbc..caa6a175 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -91,6 +91,9 @@ enum iwl_mvm_tx_fifo { }; extern struct ieee80211_ops iwl_mvm_hw_ops; +extern const struct iwl_mvm_power_ops pm_legacy_ops; +extern const struct iwl_mvm_power_ops pm_mac_ops; + /** * struct iwl_mvm_mod_params - module parameters for iwlmvm * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. @@ -150,6 +153,17 @@ enum iwl_power_scheme { #define IWL_CONN_MAX_LISTEN_INTERVAL 70 +struct iwl_mvm_power_ops { + int (*power_update_mode)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +#ifdef CONFIG_IWLWIFI_DEBUGFS + int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + char *buf, int bufsz); +#endif +}; + + #ifdef CONFIG_IWLWIFI_DEBUGFS enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), @@ -163,7 +177,7 @@ enum iwl_dbgfs_pm_mask { }; struct iwl_dbgfs_pm { - u8 keep_alive_seconds; + u16 keep_alive_seconds; u32 rx_data_timeout; u32 tx_data_timeout; bool skip_over_dtim; @@ -481,6 +495,8 @@ struct iwl_mvm { /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; s32 temperature; /* Celsius */ + + const struct iwl_mvm_power_ops *pm_ops; }; /* Extract MVM priv from op_mode and _hw */ @@ -660,10 +676,26 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, u8 flags, bool init); /* power managment */ -int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_powertable_cmd *cmd); +static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + return mvm->pm_ops->power_update_mode(mvm, vif); +} + +static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + return mvm->pm_ops->power_disable(mvm, vif); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + char *buf, int bufsz) +{ + return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz); +} +#endif int iwl_mvm_leds_init(struct iwl_mvm *mvm); void iwl_mvm_leds_exit(struct iwl_mvm *mvm); @@ -707,6 +739,10 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, + struct iwl_beacon_filter_cmd *cmd); +int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, bool enable); /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 0a4eb27..fa1e1ce 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -301,6 +301,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(MCAST_FILTER_CMD), CMD(REPLY_BEACON_FILTERING_CMD), CMD(REPLY_THERMAL_MNG_BACKOFF), + CMD(MAC_PM_POWER_TABLE), }; #undef CMD @@ -431,6 +432,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (err) goto out_unregister; + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD) + mvm->pm_ops = &pm_mac_ops; + else + mvm->pm_ops = &pm_legacy_ops; + return op_mode; out_unregister: diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index e7ca965..644bf47 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -75,8 +75,8 @@ #define POWER_KEEP_ALIVE_PERIOD_SEC 25 -static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, - struct iwl_beacon_filter_cmd *cmd) +int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, + struct iwl_beacon_filter_cmd *cmd) { int ret; @@ -106,8 +106,8 @@ static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, return ret; } -static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, bool enable) +int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, bool enable) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_beacon_filter_cmd cmd = { @@ -124,13 +124,14 @@ static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, } static void iwl_mvm_power_log(struct iwl_mvm *mvm, - struct iwl_powertable_cmd *cmd) + struct iwl_mac_power_cmd *cmd) { IWL_DEBUG_POWER(mvm, - "Sending power table command for power level %d, flags = 0x%X\n", - iwlmvm_mod_params.power_scheme, + "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", + cmd->id_and_color, iwlmvm_mod_params.power_scheme, le16_to_cpu(cmd->flags)); - IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds); + IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", + le16_to_cpu(cmd->keep_alive_seconds)); if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", @@ -139,15 +140,16 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm, le32_to_cpu(cmd->tx_data_timeout)); if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", - le32_to_cpu(cmd->skip_dtim_periods)); + cmd->skip_dtim_periods); if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", - le32_to_cpu(cmd->lprx_rssi_threshold)); + cmd->lprx_rssi_threshold); } } -void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_powertable_cmd *cmd) +static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_power_cmd *cmd) { struct ieee80211_hw *hw = mvm->hw; struct ieee80211_chanctx_conf *chanctx_conf; @@ -158,19 +160,26 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif); + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + dtimper = hw->conf.ps_dtim_period ?: 1; + /* * Regardless of power management state the driver must set * keep alive period. FW will use it for sending keep alive NDPs - * immediately after association. + * immediately after association. Check that keep alive period + * is at least 3 * DTIM */ - cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC; + dtimper_msec = dtimper * vif->bss_conf.beacon_int; + keep_alive = max_t(int, 3 * dtimper_msec, + MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC); + keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); + cmd->keep_alive_seconds = cpu_to_le16(keep_alive); if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); - if (!vif->bss_conf.assoc) - cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && @@ -186,12 +195,9 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, (vif->bss_conf.beacon_rate->bitrate == 10 || vif->bss_conf.beacon_rate->bitrate == 60)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); - cmd->lprx_rssi_threshold = - cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD); + cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; } - dtimper = hw->conf.ps_dtim_period ?: 1; - /* Check if radar detection is required on current channel */ rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); @@ -207,16 +213,9 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || mvm->cur_ucode == IWL_UCODE_WOWLAN)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); - cmd->skip_dtim_periods = cpu_to_le32(3); + cmd->skip_dtim_periods = 3; } - /* Check that keep alive period is at least 3 * DTIM */ - dtimper_msec = dtimper * vif->bss_conf.beacon_int; - keep_alive = max_t(int, 3 * dtimper_msec, - MSEC_PER_SEC * cmd->keep_alive_seconds); - keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); - cmd->keep_alive_seconds = keep_alive; - if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); @@ -227,7 +226,8 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) - cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds; + cmd->keep_alive_seconds = + cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { if (mvmvif->dbgfs_pm.skip_over_dtim) cmd->flags |= @@ -243,8 +243,7 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->tx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) - cmd->skip_dtim_periods = - cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods); + cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods; if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { if (mvmvif->dbgfs_pm.lprx_ena) cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); @@ -252,16 +251,16 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) - cmd->lprx_rssi_threshold = - cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold); + cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; #endif /* CONFIG_IWLWIFI_DEBUGFS */ } -int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { int ret; bool ba_enable; - struct iwl_powertable_cmd cmd = {}; + struct iwl_mac_power_cmd cmd = {}; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; @@ -280,7 +279,7 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_log(mvm, &cmd); - ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, + ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC, sizeof(cmd), &cmd); if (ret) return ret; @@ -291,15 +290,19 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); } -int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { - struct iwl_powertable_cmd cmd = {}; + struct iwl_mac_power_cmd cmd = {}; struct iwl_mvm_vif *mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; + cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); @@ -310,11 +313,50 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #endif iwl_mvm_power_log(mvm, &cmd); - return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, + return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC, sizeof(cmd), &cmd); } #ifdef CONFIG_IWLWIFI_DEBUGFS +static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, char *buf, + int bufsz) +{ + struct iwl_mac_power_cmd cmd = {}; + int pos = 0; + + iwl_mvm_power_build_cmd(mvm, vif, &cmd); + + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? + 0 : 1); + pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", + cmd.skip_dtim_periods); + pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", + iwlmvm_mod_params.power_scheme); + pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", + le16_to_cpu(cmd.flags)); + pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", + le16_to_cpu(cmd.keep_alive_seconds)); + + if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { + pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? + 1 : 0); + pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", + le32_to_cpu(cmd.rx_data_timeout)); + pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", + le32_to_cpu(cmd.tx_data_timeout)); + if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) + pos += scnprintf(buf+pos, bufsz-pos, + "lprx_rssi_threshold = %d\n", + cmd.lprx_rssi_threshold); + } + return pos; +} + void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) @@ -382,3 +424,11 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, return ret; } + +const struct iwl_mvm_power_ops pm_mac_ops = { + .power_update_mode = iwl_mvm_power_mac_update_mode, + .power_disable = iwl_mvm_power_mac_disable, +#ifdef CONFIG_IWLWIFI_DEBUGFS + .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read, +#endif +}; diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c new file mode 100644 index 0000000..2ce79ba --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c @@ -0,0 +1,319 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include + +#include "iwl-debug.h" +#include "mvm.h" +#include "iwl-modparams.h" +#include "fw-api-power.h" + +#define POWER_KEEP_ALIVE_PERIOD_SEC 25 + +static void iwl_mvm_power_log(struct iwl_mvm *mvm, + struct iwl_powertable_cmd *cmd) +{ + IWL_DEBUG_POWER(mvm, + "Sending power table command for power level %d, flags = 0x%X\n", + iwlmvm_mod_params.power_scheme, + le16_to_cpu(cmd->flags)); + IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds); + + if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { + IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", + le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", + le32_to_cpu(cmd->tx_data_timeout)); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) + IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", + le32_to_cpu(cmd->skip_dtim_periods)); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) + IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", + le32_to_cpu(cmd->lprx_rssi_threshold)); + } +} + +static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_powertable_cmd *cmd) +{ + struct ieee80211_hw *hw = mvm->hw; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *chan; + int dtimper, dtimper_msec; + int keep_alive; + bool radar_detect = false; + struct iwl_mvm_vif *mvmvif __maybe_unused = + iwl_mvm_vif_from_mac80211(vif); + + /* + * Regardless of power management state the driver must set + * keep alive period. FW will use it for sending keep alive NDPs + * immediately after association. + */ + cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC; + + if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) + return; + + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); + if (!vif->bss_conf.assoc) + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && + mvmvif->dbgfs_pm.disable_power_off) + cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); +#endif + if (!vif->bss_conf.ps) + return; + + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); + + if (vif->bss_conf.beacon_rate && + (vif->bss_conf.beacon_rate->bitrate == 10 || + vif->bss_conf.beacon_rate->bitrate == 60)) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); + cmd->lprx_rssi_threshold = + cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD); + } + + dtimper = hw->conf.ps_dtim_period ?: 1; + + /* Check if radar detection is required on current channel */ + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + WARN_ON(!chanctx_conf); + if (chanctx_conf) { + chan = chanctx_conf->def.chan; + radar_detect = chan->flags & IEEE80211_CHAN_RADAR; + } + rcu_read_unlock(); + + /* Check skip over DTIM conditions */ + if (!radar_detect && (dtimper <= 10) && + (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || + mvm->cur_ucode == IWL_UCODE_WOWLAN)) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + cmd->skip_dtim_periods = cpu_to_le32(3); + } + + /* Check that keep alive period is at least 3 * DTIM */ + dtimper_msec = dtimper * vif->bss_conf.beacon_int; + keep_alive = max_t(int, 3 * dtimper_msec, + MSEC_PER_SEC * cmd->keep_alive_seconds); + keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); + cmd->keep_alive_seconds = keep_alive; + + if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { + cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + } else { + cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); + cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) + cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds; + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { + if (mvmvif->dbgfs_pm.skip_over_dtim) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + else + cmd->flags &= + cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); + } + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) + cmd->rx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) + cmd->tx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) + cmd->skip_dtim_periods = + cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { + if (mvmvif->dbgfs_pm.lprx_ena) + cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); + else + cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); + } + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) + cmd->lprx_rssi_threshold = + cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold); +#endif /* CONFIG_IWLWIFI_DEBUGFS */ +} + +static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + int ret; + bool ba_enable; + struct iwl_powertable_cmd cmd = {}; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + /* + * TODO: The following vif_count verification is temporary condition. + * Avoid power mode update if more than one interface is currently + * active. Remove this condition when FW will support power management + * on multiple MACs. + */ + IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n", + mvm->vif_count); + if (mvm->vif_count > 1) + return 0; + + iwl_mvm_power_build_cmd(mvm, vif, &cmd); + iwl_mvm_power_log(mvm, &cmd); + + ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, + sizeof(cmd), &cmd); + if (ret) + return ret; + + ba_enable = !!(cmd.flags & + cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)); + + return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); +} + +static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_powertable_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif __maybe_unused = + iwl_mvm_vif_from_mac80211(vif); + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) + cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && + mvmvif->dbgfs_pm.disable_power_off) + cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); +#endif + iwl_mvm_power_log(mvm, &cmd); + + return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, + sizeof(cmd), &cmd); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, char *buf, + int bufsz) +{ + struct iwl_powertable_cmd cmd = {}; + int pos = 0; + + iwl_mvm_power_build_cmd(mvm, vif, &cmd); + + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? + 0 : 1); + pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", + le32_to_cpu(cmd.skip_dtim_periods)); + pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", + iwlmvm_mod_params.power_scheme); + pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", + le16_to_cpu(cmd.flags)); + pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", + cmd.keep_alive_seconds); + + if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { + pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? + 1 : 0); + pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", + le32_to_cpu(cmd.rx_data_timeout)); + pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", + le32_to_cpu(cmd.tx_data_timeout)); + if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) + pos += scnprintf(buf+pos, bufsz-pos, + "lprx_rssi_threshold = %d\n", + le32_to_cpu(cmd.lprx_rssi_threshold)); + } + return pos; +} +#endif + +const struct iwl_mvm_power_ops pm_legacy_ops = { + .power_update_mode = iwl_mvm_power_legacy_update_mode, + .power_disable = iwl_mvm_power_legacy_disable, +#ifdef CONFIG_IWLWIFI_DEBUGFS + .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read, +#endif +}; -- cgit v0.10.2 From 41069b4631b98646b530bdbd545b7574faccecf9 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 25 Jun 2013 21:49:19 +0300 Subject: iwlwifi: mvm: better handle several several vifs in BT Coex When there one vif on 5GHz associating, it would clear all the BT Coex constraints. This can't work if there is another vif on 2.4GHz. Fix that. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index dbd622a..9195779 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -384,6 +384,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, smps_mode = IEEE80211_SMPS_AUTOMATIC; + /* non associated BSSes aren't to be considered */ + if (!vif->bss_conf.assoc) + return; + if (band != IEEE80211_BAND_2GHZ) { iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode); @@ -588,23 +592,5 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_band band; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - if (chanctx_conf && chanctx_conf->def.chan) - band = chanctx_conf->def.chan->band; - else - band = -1; - rcu_read_unlock(); - - /* if we are in 2GHz we will get a notification from the fw */ - if (band == IEEE80211_BAND_2GHZ) - return; - - /* else, we can remove all the constraints */ - memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); - iwl_mvm_bt_coex_notif_handle(mvm); } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 2d07605..30319e0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -761,7 +761,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_ERR(mvm, "failed to update quotas\n"); return; } - iwl_mvm_bt_coex_vif_assoc(mvm, vif); iwl_mvm_configure_mcast_filter(mvm, vif); } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* remove AP station now that the MAC is unassoc */ @@ -782,6 +781,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } + iwl_mvm_bt_coex_vif_assoc(mvm, vif); } else if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon _after_ association so -- cgit v0.10.2 From 8e0366f9c7ceefaa1c9fdb9529d23011d655d49c Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 25 Jun 2013 21:57:08 +0300 Subject: iwlwifi: mvm: fix the ACK / CTS kill mask upon RSSI event If a vif's RSSI gets good enough, we can enable reduced Tx power. If so, we need to update the ACK / CTS kill mask accordingly. Since the auditing for the interfaces was bad, we enabled reduced Tx power, but didn't update the ACK / CTS kill mask. This is harmless since the firmware is most likely to discard this setting anyway, but it is a good practice to update it. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 9195779..3a48cd9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -527,6 +527,8 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, lockdep_is_held(&mvm->mutex)); mvmsta = (void *)sta->drv_priv; + data->num_bss_ifaces++; + /* * This interface doesn't support reduced Tx power (because of low * RSSI probably), then set bt_kill_msk to default values. -- cgit v0.10.2 From 03e304e4e7ace25a532bfed51c7737d4aec768cd Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 26 Jun 2013 15:54:34 +0300 Subject: iwlwifi: mvm: don't allocate BT_COEX cmd on stack This command will change and be much bigger. Prepare to that by stop allocating on the stack. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 3a48cd9..0fad98b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -220,66 +220,87 @@ static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = { int iwl_send_bt_init_conf(struct iwl_mvm *mvm) { - struct iwl_bt_coex_cmd cmd = { - .max_kill = 5, - .bt3_time_t7_value = 1, - .bt3_prio_sample_time = 2, - .bt3_timer_t2_value = 0xc, + struct iwl_bt_coex_cmd *bt_cmd; + struct iwl_host_cmd cmd = { + .id = BT_CONFIG, + .len = { sizeof(*bt_cmd), }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + .flags = CMD_SYNC, }; int ret; - cmd.flags = iwlwifi_mod_params.bt_coex_active ? + /* go to CALIB state in internal BT-Coex state machine */ + ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN, + BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); + if (ret) + return ret; + + ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE, + BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); + if (ret) + return ret; + + bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); + if (!bt_cmd) + return -ENOMEM; + cmd.data[0] = bt_cmd; + + bt_cmd->max_kill = 5; + bt_cmd->bt3_time_t7_value = 1; + bt_cmd->bt3_prio_sample_time = 2; + bt_cmd->bt3_timer_t2_value = 0xc; + + bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; - cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE; + bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE; - cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE | - BT_VALID_BT_PRIO_BOOST | - BT_VALID_MAX_KILL | - BT_VALID_3W_TMRS | - BT_VALID_KILL_ACK | - BT_VALID_KILL_CTS | - BT_VALID_REDUCED_TX_POWER | - BT_VALID_LUT); + bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE | + BT_VALID_BT_PRIO_BOOST | + BT_VALID_MAX_KILL | + BT_VALID_3W_TMRS | + BT_VALID_KILL_ACK | + BT_VALID_KILL_CTS | + BT_VALID_REDUCED_TX_POWER | + BT_VALID_LUT); if (mvm->cfg->bt_shared_single_ant) - memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup, + memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup, sizeof(iwl_single_shared_ant_lookup)); else if (is_loose_coex()) - memcpy(&cmd.decision_lut, iwl_loose_lookup, + memcpy(&bt_cmd->decision_lut, iwl_loose_lookup, sizeof(iwl_tight_lookup)); else - memcpy(&cmd.decision_lut, iwl_tight_lookup, + memcpy(&bt_cmd->decision_lut, iwl_tight_lookup, sizeof(iwl_tight_lookup)); - cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST); - cmd.kill_ack_msk = + bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST); + bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]); - cmd.kill_cts_msk = + bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); - /* go to CALIB state in internal BT-Coex state machine */ - ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN, - BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); - if (ret) - return ret; - - ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE, - BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); - if (ret) - return ret; + ret = iwl_mvm_send_cmd(mvm, &cmd); - return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, - sizeof(cmd), &cmd); + kfree(bt_cmd); + return ret; } static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm, bool reduced_tx_power) { enum iwl_bt_kill_msk bt_kill_msk; - struct iwl_bt_coex_cmd cmd = {}; + struct iwl_bt_coex_cmd *bt_cmd; struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; + struct iwl_host_cmd cmd = { + .id = BT_CONFIG, + .data[0] = &bt_cmd, + .len = { sizeof(*bt_cmd), }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + .flags = CMD_SYNC, + }; + int ret = 0; lockdep_assert_held(&mvm->mutex); @@ -308,24 +329,40 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm, return 0; mvm->bt_kill_msk = bt_kill_msk; - cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]); - cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]); - cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS); + + bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); + if (!bt_cmd) + return -ENOMEM; + cmd.data[0] = bt_cmd; + + bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]); + bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]); + bt_cmd->valid_bit_msk = + cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS); IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk); - return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, - sizeof(cmd), &cmd); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(bt_cmd); + return ret; } static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable) { - struct iwl_bt_coex_cmd cmd = { - .valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER), - .bt_reduced_tx_power = sta_id, + struct iwl_bt_coex_cmd *bt_cmd; + /* Send ASYNC since this can be sent from an atomic context */ + struct iwl_host_cmd cmd = { + .id = BT_CONFIG, + .len = { sizeof(*bt_cmd), }, + .dataflags = { IWL_HCMD_DFL_DUP, }, + .flags = CMD_ASYNC, }; + struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; + int ret; /* This can happen if the station has been removed right now */ if (sta_id == IWL_MVM_STATION_COUNT) @@ -339,17 +376,26 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, if (mvmsta->bt_reduced_txpower == enable) return 0; + bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); + if (!bt_cmd) + return -ENOMEM; + cmd.data[0] = bt_cmd; + + bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER), + bt_cmd->bt_reduced_tx_power = sta_id; + if (enable) - cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; + bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", enable ? "en" : "dis", sta_id); mvmsta->bt_reduced_txpower = enable; - /* Send ASYNC since this can be sent from an atomic context */ - return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC, - sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(bt_cmd); + return ret; } struct iwl_bt_iterator_data { -- cgit v0.10.2 From aafee33423e78e86becda986b24a5819a7d16ff7 Mon Sep 17 00:00:00 2001 From: Dragos Foianu Date: Wed, 17 Jul 2013 12:25:38 +0100 Subject: net/irda: fixed style issues in irttp Applied error fixes suggested by checpatch.pl Signed-off-by: Dragos Foianu Signed-off-by: David S. Miller diff --git a/net/irda/irttp.c b/net/irda/irttp.c index ae43c62..85372cf 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -75,7 +75,7 @@ static pi_minor_info_t pi_minor_call_table[] = { { NULL, 0 }, /* 0x00 */ { irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */ }; -static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }}; +static pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table, 2 } }; static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 }; /************************ GLOBAL PROCEDURES ************************/ @@ -205,7 +205,7 @@ static void irttp_todo_expired(unsigned long data) */ static void irttp_flush_queues(struct tsap_cb *self) { - struct sk_buff* skb; + struct sk_buff *skb; IRDA_DEBUG(4, "%s()\n", __func__); @@ -400,7 +400,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to * use only 0x01-0x6F. Of course, we can use LSAP_ANY as well. * JeanII */ - if((stsap_sel != LSAP_ANY) && + if ((stsap_sel != LSAP_ANY) && ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__); return NULL; @@ -427,7 +427,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) ttp_notify.data_indication = irttp_data_indication; ttp_notify.udata_indication = irttp_udata_indication; ttp_notify.flow_indication = irttp_flow_indication; - if(notify->status_indication != NULL) + if (notify->status_indication != NULL) ttp_notify.status_indication = irttp_status_indication; ttp_notify.instance = self; strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME); @@ -639,8 +639,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) */ if ((self->tx_max_sdu_size != 0) && (self->tx_max_sdu_size != TTP_SAR_UNBOUND) && - (skb->len > self->tx_max_sdu_size)) - { + (skb->len > self->tx_max_sdu_size)) { IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", __func__); ret = -EMSGSIZE; @@ -733,8 +732,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self) * poll us through irttp_flow_indication() - Jean II */ while ((self->send_credit > 0) && (!irlmp_lap_tx_queue_full(self->lsap)) && - (skb = skb_dequeue(&self->tx_queue))) - { + (skb = skb_dequeue(&self->tx_queue))) { /* * Since we can transmit and receive frames concurrently, * the code below is a critical region and we must assure that @@ -798,8 +796,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self) * where we can spend a bit of time doing stuff. - Jean II */ if ((self->tx_sdu_busy) && (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) && - (!self->close_pend)) - { + (!self->close_pend)) { if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, self, FLOW_START); @@ -892,7 +889,7 @@ static int irttp_udata_indication(void *instance, void *sap, /* Just pass data to layer above */ if (self->notify.udata_indication) { err = self->notify.udata_indication(self->notify.instance, - self,skb); + self, skb); /* Same comment as in irttp_do_data_indication() */ if (!err) return 0; @@ -1057,7 +1054,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) * to do that. Jean II */ /* If we need to send disconnect. try to do it now */ - if(self->disconnect_pend) + if (self->disconnect_pend) irttp_start_todo_timer(self, 0); } @@ -1116,7 +1113,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); if (self->connected) { - if(userdata) + if (userdata) dev_kfree_skb(userdata); return -EISCONN; } @@ -1137,7 +1134,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, * headers */ IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER, - { dev_kfree_skb(userdata); return -1; } ); + { dev_kfree_skb(userdata); return -1; }); } /* Initialize connection parameters */ @@ -1157,7 +1154,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, * Give away max 127 credits for now */ if (n > 127) { - self->avail_credit=n-127; + self->avail_credit = n - 127; n = 127; } @@ -1166,10 +1163,10 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, /* SAR enabled? */ if (max_sdu_size > 0) { IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER), - { dev_kfree_skb(tx_skb); return -1; } ); + { dev_kfree_skb(tx_skb); return -1; }); /* Insert SAR parameters */ - frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER); + frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER); frame[0] = TTP_PARAMETERS | n; frame[1] = 0x04; /* Length */ @@ -1386,7 +1383,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, * headers */ IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER, - { dev_kfree_skb(userdata); return -1; } ); + { dev_kfree_skb(userdata); return -1; }); } self->avail_credit = 0; @@ -1409,10 +1406,10 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, /* SAR enabled? */ if (max_sdu_size > 0) { IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER), - { dev_kfree_skb(tx_skb); return -1; } ); + { dev_kfree_skb(tx_skb); return -1; }); /* Insert TTP header with SAR parameters */ - frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER); + frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER); frame[0] = TTP_PARAMETERS | n; frame[1] = 0x04; /* Length */ @@ -1522,7 +1519,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, * function may be called from various context, like user, timer * for following a disconnect_indication() (i.e. net_bh). * Jean II */ - if(test_and_set_bit(0, &self->disconnect_pend)) { + if (test_and_set_bit(0, &self->disconnect_pend)) { IRDA_DEBUG(0, "%s(), disconnect already pending\n", __func__); if (userdata) @@ -1627,7 +1624,7 @@ static void irttp_disconnect_indication(void *instance, void *sap, * Jean II */ /* No need to notify the client if has already tried to disconnect */ - if(self->notify.disconnect_indication) + if (self->notify.disconnect_indication) self->notify.disconnect_indication(self->notify.instance, self, reason, skb); else @@ -1738,8 +1735,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self) * This is the last fragment, so time to reassemble! */ if ((self->rx_sdu_size <= self->rx_max_sdu_size) || - (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) - { + (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) { /* * A little optimizing. Only queue the fragment if * there are other fragments. Since if this is the @@ -1860,7 +1856,7 @@ static int irttp_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "dtsap_sel: %02x\n", self->dtsap_sel); seq_printf(seq, " connected: %s, ", - self->connected? "TRUE":"FALSE"); + self->connected ? "TRUE" : "FALSE"); seq_printf(seq, "avail credit: %d, ", self->avail_credit); seq_printf(seq, "remote credit: %d, ", @@ -1876,9 +1872,9 @@ static int irttp_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "rx_queue len: %u\n", skb_queue_len(&self->rx_queue)); seq_printf(seq, " tx_sdu_busy: %s, ", - self->tx_sdu_busy? "TRUE":"FALSE"); + self->tx_sdu_busy ? "TRUE" : "FALSE"); seq_printf(seq, "rx_sdu_busy: %s\n", - self->rx_sdu_busy? "TRUE":"FALSE"); + self->rx_sdu_busy ? "TRUE" : "FALSE"); seq_printf(seq, " max_seg_size: %u, ", self->max_seg_size); seq_printf(seq, "tx_max_sdu_size: %u, ", -- cgit v0.10.2 From 6e3d6774a7d59e33bebeb0ef66888bc7dbfed4c7 Mon Sep 17 00:00:00 2001 From: Alexandru Juncu Date: Thu, 18 Jul 2013 14:36:48 +0300 Subject: mISDN: replace sum of bitmasks with OR operation. Suggested by coccinelle and manually verified. Signed-off-by: Alexandru Juncu Signed-off-by: David S. Miller diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index a7e4939..7f910c7 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1307,11 +1307,11 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) } if (fifo2 & 2) { hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2; - hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS + + hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS | HFCPCI_INTS_B2REC); } else { hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1; - hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS + + hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS | HFCPCI_INTS_B1REC); } #ifdef REVERSE_BITORDER @@ -1346,14 +1346,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) if (fifo2 & 2) { hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; if (!tics) - hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + + hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS | HFCPCI_INTS_B2REC); hc->hw.ctmt |= 2; hc->hw.conn &= ~0x18; } else { hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; if (!tics) - hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + + hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS | HFCPCI_INTS_B1REC); hc->hw.ctmt |= 1; hc->hw.conn &= ~0x03; @@ -1375,14 +1375,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) if (fifo2 & 2) { hc->hw.last_bfifo_cnt[1] = 0; hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; - hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + + hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS | HFCPCI_INTS_B2REC); hc->hw.ctmt &= ~2; hc->hw.conn &= ~0x18; } else { hc->hw.last_bfifo_cnt[0] = 0; hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; - hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + + hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS | HFCPCI_INTS_B1REC); hc->hw.ctmt &= ~1; hc->hw.conn &= ~0x03; -- cgit v0.10.2 From 0887a576a17965732270b2f8d37821fc02ef2feb Mon Sep 17 00:00:00 2001 From: Amit Uttamchandani Date: Thu, 18 Jul 2013 17:45:22 -0700 Subject: net/velocity: add poll controller function for velocity nic Add poll controller function for velocity nic. Signed-off-by: Amit Uttamchandani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41..ef77631 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2376,6 +2376,23 @@ out_0: return ret; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * velocity_poll_controller - Velocity Poll controller function + * @dev: network device + * + * + * Used by NETCONSOLE and other diagnostic tools to allow network I/P + * with interrupts disabled. + */ +static void velocity_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + velocity_intr(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + /** * velocity_mii_ioctl - MII ioctl handler * @dev: network device @@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = { .ndo_do_ioctl = velocity_ioctl, .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = velocity_poll_controller, +#endif }; /** -- cgit v0.10.2 From 3266d73237a7441a30d64a97327738e70f1b5254 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Wed, 3 Jul 2013 10:47:10 -0600 Subject: wireless: Convert mwifiex/pcie to dev_pm_ops from legacy pm_ops Convert the mwifiex/pci driver to use dev_pm_ops for power management and remove Legacy PM handling. This change re-uses existing suspend and resume interfaces for dev_pm_ops, and changes CONFIG_PM ifdefs to CONFIG_PM_SLEEP as the driver does not support run-time PM. Signed-off-by: Shuah Khan Acked-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 20c9c4c..4a57eb4 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -76,7 +76,7 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) return false; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /* * Kernel needs to suspend all functions separately. Therefore all * registered functions must have drivers with suspend and resume @@ -85,11 +85,12 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) * If already not suspended, this function allocates and sends a host * sleep activate request to the firmware and turns off the traffic. */ -static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state) +static int mwifiex_pcie_suspend(struct device *dev) { struct mwifiex_adapter *adapter; struct pcie_service_card *card; int hs_actived; + struct pci_dev *pdev = to_pci_dev(dev); if (pdev) { card = (struct pcie_service_card *) pci_get_drvdata(pdev); @@ -120,10 +121,11 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state) * If already not resumed, this function turns on the traffic and * sends a host sleep cancel request to the firmware. */ -static int mwifiex_pcie_resume(struct pci_dev *pdev) +static int mwifiex_pcie_resume(struct device *dev) { struct mwifiex_adapter *adapter; struct pcie_service_card *card; + struct pci_dev *pdev = to_pci_dev(dev); if (pdev) { card = (struct pcie_service_card *) pci_get_drvdata(pdev); @@ -211,9 +213,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) wait_for_completion(&adapter->fw_load); if (user_rmmod) { -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP if (adapter->is_suspended) - mwifiex_pcie_resume(pdev); + mwifiex_pcie_resume(&pdev->dev); #endif for (i = 0; i < adapter->priv_num; i++) @@ -249,16 +251,22 @@ static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = { MODULE_DEVICE_TABLE(pci, mwifiex_ids); +#ifdef CONFIG_PM_SLEEP +/* Power Management Hooks */ +static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend, + mwifiex_pcie_resume); +#endif + /* PCI Device Driver */ static struct pci_driver __refdata mwifiex_pcie = { .name = "mwifiex_pcie", .id_table = mwifiex_ids, .probe = mwifiex_pcie_probe, .remove = mwifiex_pcie_remove, -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = mwifiex_pcie_suspend, - .resume = mwifiex_pcie_resume, +#ifdef CONFIG_PM_SLEEP + .driver = { + .pm = &mwifiex_pcie_pm_ops, + }, #endif }; -- cgit v0.10.2 From 74e13060f11dbf4028b810e34c359f64929415f3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 3 Jul 2013 20:55:38 +0200 Subject: ath9k: make rfkill configurable When the platform doesn't have rfkill support, i.e. nothing is connected to the rfkill GPIO, there's little value in polling the GPIO. Add a Kconfig option to allow disabling the polling in ath9k. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index d491a31..c91bc61 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -96,6 +96,16 @@ config ATH9K_LEGACY_RATE_CONTROL has to be passed to mac80211 using the module parameter, ieee80211_default_rc_algo. +config ATH9K_RFKILL + bool "Atheros ath9k rfkill support" if EXPERT + depends on ATH9K + depends on RFKILL=y || RFKILL=ATH9K + default y + help + Say Y to have ath9k poll the RF-Kill GPIO every couple of + seconds. Turn off to save power, but enable it if you have + a platform that can toggle the RF-Kill GPIO. + config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 4ca0cb0..e420c6b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2504,7 +2504,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) else pCap->rts_aggr_limit = (8 * 1024); -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) +#ifdef CONFIG_ATH9K_RFKILL ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); if (ah->rfsilent & EEP_RFSILENT_ENABLED) { ah->rfkill_gpio = -- cgit v0.10.2 From a3835e9fa72956a9a348079f8161e6a5057b5c88 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 4 Jul 2013 12:59:47 +0530 Subject: ath9k: Fix RTS threshold Currently, RTS threshold is not handled for HT. Handle user-specified threshold values for both aggregated and unaggregated frames. Use the wiphy's threshold parameter for now, it can be made per-VIF later on. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index c59ae43..e347d8ce 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -999,7 +999,7 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop) } static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, - struct ath_tx_info *info, int len) + struct ath_tx_info *info, int len, bool rts) { struct ath_hw *ah = sc->sc_ah; struct sk_buff *skb; @@ -1008,6 +1008,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, const struct ieee80211_rate *rate; struct ieee80211_hdr *hdr; struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); + u32 rts_thresh = sc->hw->wiphy->rts_threshold; int i; u8 rix = 0; @@ -1030,7 +1031,17 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, rix = rates[i].idx; info->rates[i].Tries = rates[i].count; - if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + /* + * Handle RTS threshold for unaggregated HT frames. + */ + if (bf_isampdu(bf) && !bf_isaggr(bf) && + (rates[i].flags & IEEE80211_TX_RC_MCS) && + unlikely(rts_thresh != (u32) -1)) { + if (!rts_thresh || (len > rts_thresh)) + rts = true; + } + + if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; info->flags |= ATH9K_TXDESC_RTSENA; } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { @@ -1123,6 +1134,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, struct ath_hw *ah = sc->sc_ah; struct ath_buf *bf_first = NULL; struct ath_tx_info info; + u32 rts_thresh = sc->hw->wiphy->rts_threshold; + bool rts = false; memset(&info, 0, sizeof(info)); info.is_first = true; @@ -1159,7 +1172,22 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; - ath_buf_set_rate(sc, bf, &info, len); + /* + * mac80211 doesn't handle RTS threshold for HT because + * the decision has to be taken based on AMPDU length + * and aggregation is done entirely inside ath9k. + * Set the RTS/CTS flag for the first subframe based + * on the threshold. + */ + if (aggr && (bf == bf_first) && + unlikely(rts_thresh != (u32) -1)) { + /* + * "len" is the size of the entire AMPDU. + */ + if (!rts_thresh || (len > rts_thresh)) + rts = true; + } + ath_buf_set_rate(sc, bf, &info, len, rts); } info.buf_addr[0] = bf->bf_buf_addr; @@ -2142,7 +2170,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bf->bf_lastbf = bf; ath_set_rates(vif, NULL, bf); - ath_buf_set_rate(sc, bf, &info, fi->framelen); + ath_buf_set_rate(sc, bf, &info, fi->framelen, false); duration += info.rates[0].PktDuration; if (bf_tail) bf_tail->bf_next = bf; -- cgit v0.10.2 From 8951b79a4e6d8228babf56ae79a345e4abc5ac82 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 11:25:52 +0200 Subject: rt2x00: rt2800lib: introduce rt2800_eeprom_word enum The patch converts the EEPROM_* word address defines into new enum values. The new enum type will be used by new functions which will be introduced in subsequent changes. The patch contains no functional changes. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index d78c495..0647039 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2206,28 +2206,59 @@ struct mac_iveiv_entry { * The wordsize of the EEPROM is 16 bits. */ -/* - * Chip ID - */ -#define EEPROM_CHIP_ID 0x0000 +enum rt2800_eeprom_word { + EEPROM_CHIP_ID = 0x0000, + EEPROM_VERSION = 0x0001, + EEPROM_MAC_ADDR_0 = 0x0002, + EEPROM_MAC_ADDR_1 = 0x0003, + EEPROM_MAC_ADDR_2 = 0x0004, + EEPROM_NIC_CONF0 = 0x001a, + EEPROM_NIC_CONF1 = 0x001b, + EEPROM_FREQ = 0x001d, + EEPROM_LED_AG_CONF = 0x001e, + EEPROM_LED_ACT_CONF = 0x001f, + EEPROM_LED_POLARITY = 0x0020, + EEPROM_NIC_CONF2 = 0x0021, + EEPROM_LNA = 0x0022, + EEPROM_RSSI_BG = 0x0023, + EEPROM_RSSI_BG2 = 0x0024, + EEPROM_TXMIXER_GAIN_BG = 0x0024, /* overlaps with RSSI_BG2 */ + EEPROM_RSSI_A = 0x0025, + EEPROM_RSSI_A2 = 0x0026, + EEPROM_TXMIXER_GAIN_A = 0x0026, /* overlaps with RSSI_A2 */ + EEPROM_EIRP_MAX_TX_POWER = 0x0027, + EEPROM_TXPOWER_DELTA = 0x0028, + EEPROM_TXPOWER_BG1 = 0x0029, + EEPROM_TXPOWER_BG2 = 0x0030, + EEPROM_TSSI_BOUND_BG1 = 0x0037, + EEPROM_TSSI_BOUND_BG2 = 0x0038, + EEPROM_TSSI_BOUND_BG3 = 0x0039, + EEPROM_TSSI_BOUND_BG4 = 0x003a, + EEPROM_TSSI_BOUND_BG5 = 0x003b, + EEPROM_TXPOWER_A1 = 0x003c, + EEPROM_TXPOWER_A2 = 0x0053, + EEPROM_TSSI_BOUND_A1 = 0x006a, + EEPROM_TSSI_BOUND_A2 = 0x006b, + EEPROM_TSSI_BOUND_A3 = 0x006c, + EEPROM_TSSI_BOUND_A4 = 0x006d, + EEPROM_TSSI_BOUND_A5 = 0x006e, + EEPROM_TXPOWER_BYRATE = 0x006f, + EEPROM_BBP_START = 0x0078, +}; /* * EEPROM Version */ -#define EEPROM_VERSION 0x0001 #define EEPROM_VERSION_FAE FIELD16(0x00ff) #define EEPROM_VERSION_VERSION FIELD16(0xff00) /* * HW MAC address. */ -#define EEPROM_MAC_ADDR_0 0x0002 #define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) #define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) -#define EEPROM_MAC_ADDR_1 0x0003 #define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) #define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) -#define EEPROM_MAC_ADDR_2 0x0004 #define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) #define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) @@ -2237,7 +2268,6 @@ struct mac_iveiv_entry { * TXPATH: 1: 1T, 2: 2T, 3: 3T * RF_TYPE: RFIC type */ -#define EEPROM_NIC_CONF0 0x001a #define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f) #define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0) #define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00) @@ -2261,7 +2291,6 @@ struct mac_iveiv_entry { * BT_COEXIST: 0: disable, 1: enable * DAC_TEST: 0: disable, 1: enable */ -#define EEPROM_NIC_CONF1 0x001b #define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001) #define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002) #define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004) @@ -2281,7 +2310,6 @@ struct mac_iveiv_entry { /* * EEPROM frequency */ -#define EEPROM_FREQ 0x001d #define EEPROM_FREQ_OFFSET FIELD16(0x00ff) #define EEPROM_FREQ_LED_MODE FIELD16(0x7f00) #define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000) @@ -2298,9 +2326,6 @@ struct mac_iveiv_entry { * POLARITY_GPIO_4: Polarity GPIO4 setting. * LED_MODE: Led mode. */ -#define EEPROM_LED_AG_CONF 0x001e -#define EEPROM_LED_ACT_CONF 0x001f -#define EEPROM_LED_POLARITY 0x0020 #define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001) #define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) #define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) @@ -2317,7 +2342,6 @@ struct mac_iveiv_entry { * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved */ -#define EEPROM_NIC_CONF2 0x0021 #define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f) #define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0) #define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600) @@ -2325,54 +2349,46 @@ struct mac_iveiv_entry { /* * EEPROM LNA */ -#define EEPROM_LNA 0x0022 #define EEPROM_LNA_BG FIELD16(0x00ff) #define EEPROM_LNA_A0 FIELD16(0xff00) /* * EEPROM RSSI BG offset */ -#define EEPROM_RSSI_BG 0x0023 #define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff) #define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00) /* * EEPROM RSSI BG2 offset */ -#define EEPROM_RSSI_BG2 0x0024 #define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff) #define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00) /* * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2). */ -#define EEPROM_TXMIXER_GAIN_BG 0x0024 #define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007) /* * EEPROM RSSI A offset */ -#define EEPROM_RSSI_A 0x0025 #define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff) #define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00) /* * EEPROM RSSI A2 offset */ -#define EEPROM_RSSI_A2 0x0026 #define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff) #define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) /* * EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2). */ -#define EEPROM_TXMIXER_GAIN_A 0x0026 #define EEPROM_TXMIXER_GAIN_A_VAL FIELD16(0x0007) /* * EEPROM EIRP Maximum TX power values(unit: dbm) */ -#define EEPROM_EIRP_MAX_TX_POWER 0x0027 #define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff) #define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00) @@ -2383,7 +2399,6 @@ struct mac_iveiv_entry { * TYPE: 1: Plus the delta value, 0: minus the delta value * ENABLE: enable tx power compensation for 40BW */ -#define EEPROM_TXPOWER_DELTA 0x0028 #define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f) #define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040) #define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080) @@ -2394,8 +2409,6 @@ struct mac_iveiv_entry { /* * EEPROM TXPOWER 802.11BG */ -#define EEPROM_TXPOWER_BG1 0x0029 -#define EEPROM_TXPOWER_BG2 0x0030 #define EEPROM_TXPOWER_BG_SIZE 7 #define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff) #define EEPROM_TXPOWER_BG_2 FIELD16(0xff00) @@ -2407,7 +2420,6 @@ struct mac_iveiv_entry { * MINUS3: If the actual TSSI is below this boundary, tx power needs to be * reduced by (agc_step * -3) */ -#define EEPROM_TSSI_BOUND_BG1 0x0037 #define EEPROM_TSSI_BOUND_BG1_MINUS4 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_BG1_MINUS3 FIELD16(0xff00) @@ -2418,7 +2430,6 @@ struct mac_iveiv_entry { * MINUS1: If the actual TSSI is below this boundary, tx power needs to be * reduced by (agc_step * -1) */ -#define EEPROM_TSSI_BOUND_BG2 0x0038 #define EEPROM_TSSI_BOUND_BG2_MINUS2 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_BG2_MINUS1 FIELD16(0xff00) @@ -2428,7 +2439,6 @@ struct mac_iveiv_entry { * PLUS1: If the actual TSSI is above this boundary, tx power needs to be * increased by (agc_step * 1) */ -#define EEPROM_TSSI_BOUND_BG3 0x0039 #define EEPROM_TSSI_BOUND_BG3_REF FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_BG3_PLUS1 FIELD16(0xff00) @@ -2439,7 +2449,6 @@ struct mac_iveiv_entry { * PLUS3: If the actual TSSI is above this boundary, tx power needs to be * increased by (agc_step * 3) */ -#define EEPROM_TSSI_BOUND_BG4 0x003a #define EEPROM_TSSI_BOUND_BG4_PLUS2 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_BG4_PLUS3 FIELD16(0xff00) @@ -2449,15 +2458,12 @@ struct mac_iveiv_entry { * increased by (agc_step * 4) * AGC_STEP: Temperature compensation step. */ -#define EEPROM_TSSI_BOUND_BG5 0x003b #define EEPROM_TSSI_BOUND_BG5_PLUS4 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_BG5_AGC_STEP FIELD16(0xff00) /* * EEPROM TXPOWER 802.11A */ -#define EEPROM_TXPOWER_A1 0x003c -#define EEPROM_TXPOWER_A2 0x0053 #define EEPROM_TXPOWER_A_SIZE 6 #define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) #define EEPROM_TXPOWER_A_2 FIELD16(0xff00) @@ -2469,7 +2475,6 @@ struct mac_iveiv_entry { * MINUS3: If the actual TSSI is below this boundary, tx power needs to be * reduced by (agc_step * -3) */ -#define EEPROM_TSSI_BOUND_A1 0x006a #define EEPROM_TSSI_BOUND_A1_MINUS4 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_A1_MINUS3 FIELD16(0xff00) @@ -2480,7 +2485,6 @@ struct mac_iveiv_entry { * MINUS1: If the actual TSSI is below this boundary, tx power needs to be * reduced by (agc_step * -1) */ -#define EEPROM_TSSI_BOUND_A2 0x006b #define EEPROM_TSSI_BOUND_A2_MINUS2 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_A2_MINUS1 FIELD16(0xff00) @@ -2490,7 +2494,6 @@ struct mac_iveiv_entry { * PLUS1: If the actual TSSI is above this boundary, tx power needs to be * increased by (agc_step * 1) */ -#define EEPROM_TSSI_BOUND_A3 0x006c #define EEPROM_TSSI_BOUND_A3_REF FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_A3_PLUS1 FIELD16(0xff00) @@ -2501,7 +2504,6 @@ struct mac_iveiv_entry { * PLUS3: If the actual TSSI is above this boundary, tx power needs to be * increased by (agc_step * 3) */ -#define EEPROM_TSSI_BOUND_A4 0x006d #define EEPROM_TSSI_BOUND_A4_PLUS2 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_A4_PLUS3 FIELD16(0xff00) @@ -2511,14 +2513,12 @@ struct mac_iveiv_entry { * increased by (agc_step * 4) * AGC_STEP: Temperature compensation step. */ -#define EEPROM_TSSI_BOUND_A5 0x006e #define EEPROM_TSSI_BOUND_A5_PLUS4 FIELD16(0x00ff) #define EEPROM_TSSI_BOUND_A5_AGC_STEP FIELD16(0xff00) /* * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode */ -#define EEPROM_TXPOWER_BYRATE 0x006f #define EEPROM_TXPOWER_BYRATE_SIZE 9 #define EEPROM_TXPOWER_BYRATE_RATE0 FIELD16(0x000f) @@ -2529,7 +2529,6 @@ struct mac_iveiv_entry { /* * EEPROM BBP. */ -#define EEPROM_BBP_START 0x0078 #define EEPROM_BBP_SIZE 16 #define EEPROM_BBP_VALUE FIELD16(0x00ff) #define EEPROM_BBP_REG_ID FIELD16(0xff00) -- cgit v0.10.2 From 3e38d3daf881a78ac13e93504a8ac5777040797e Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 11:25:53 +0200 Subject: rt2x00: rt2800lib: introduce local EEPROM access functions The patch adds rt2800 specific functions for EEPROM data access and changes the code to use these instead of the generic rt2x00_eeprom_* variants. To avoid functional changes, the new functions are wrappers around the corresponding generic rt2x00_eeprom_* routines for now. Functional changes will be implemented in additional patches. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1f80ea5..522f0b1 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -221,6 +221,24 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, mutex_unlock(&rt2x00dev->csr_mutex); } +static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev, + const enum rt2800_eeprom_word word) +{ + return rt2x00_eeprom_addr(rt2x00dev, word); +} + +static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev, + const enum rt2800_eeprom_word word, u16 *data) +{ + rt2x00_eeprom_read(rt2x00dev, word, data); +} + +static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev, + const enum rt2800_eeprom_word word, u16 data) +{ + rt2x00_eeprom_write(rt2x00dev, word, data); +} + static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) { u32 reg; @@ -609,16 +627,16 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) u8 offset2; if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2); } else { - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom); offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0); offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1); - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2); } @@ -890,6 +908,9 @@ const struct rt2x00debug rt2800_rt2x00debug = { .word_count = CSR_REG_SIZE / sizeof(u32), }, .eeprom = { + /* NOTE: The local EEPROM access functions can't + * be used here, use the generic versions instead. + */ .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, @@ -1547,7 +1568,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev) led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3; if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) || led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE); if (led_ctrl == 0 || led_ctrl > 0x40) { rt2x00_set_field32(®, LED_CFG_G_LED_MODE, led_g_mode); @@ -1622,7 +1643,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) rt2x00_rt(rt2x00dev, RT3090) || rt2x00_rt(rt2x00dev, RT3352) || rt2x00_rt(rt2x00dev, RT3390)) { - rt2x00_eeprom_read(rt2x00dev, + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY)) @@ -1659,16 +1680,16 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, short lna_gain; if (libconf->rf.channel <= 14) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG); } else if (libconf->rf.channel <= 64) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0); } else if (libconf->rf.channel <= 128) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1); } else { - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2); } @@ -2798,62 +2819,62 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00 */ if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom); tssi_bounds[0] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG1_MINUS4); tssi_bounds[1] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG1_MINUS3); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom); tssi_bounds[2] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG2_MINUS2); tssi_bounds[3] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG2_MINUS1); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom); tssi_bounds[4] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG3_REF); tssi_bounds[5] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG3_PLUS1); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom); tssi_bounds[6] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG4_PLUS2); tssi_bounds[7] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG4_PLUS3); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom); tssi_bounds[8] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG5_PLUS4); step = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG5_AGC_STEP); } else { - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom); tssi_bounds[0] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A1_MINUS4); tssi_bounds[1] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A1_MINUS3); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom); tssi_bounds[2] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A2_MINUS2); tssi_bounds[3] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A2_MINUS1); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom); tssi_bounds[4] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A3_REF); tssi_bounds[5] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A3_PLUS1); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom); tssi_bounds[6] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A4_PLUS2); tssi_bounds[7] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A4_PLUS3); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom); tssi_bounds[8] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A5_PLUS4); @@ -2899,7 +2920,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev, u8 comp_type; int comp_value = 0; - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom); /* * HT40 compensation not required. @@ -2974,12 +2995,12 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, * .11b data rate need add additional 4dbm * when calculating eirp txpower. */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1, + rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1, &eeprom); criterion = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); - rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, + rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom); if (band == IEEE80211_BAND_2GHZ) @@ -3080,7 +3101,7 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, rt2800_register_read(rt2x00dev, offset, ®); /* read the next four txpower values */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, + rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, &eeprom); is_rate_b = i ? 0 : 1; @@ -3129,7 +3150,7 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, TX_PWR_CFG_RATE3, txpower); /* read the next four txpower values */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, + rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, &eeprom); is_rate_b = 0; @@ -3528,7 +3549,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, + &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST)) rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c); @@ -3989,7 +4011,7 @@ static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev) u8 value; rt2800_bbp_read(rt2x00dev, 138, &value); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) value |= 0x20; if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) @@ -4402,7 +4424,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) rt2800_disable_unused_dac_adc(rt2x00dev); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); ant = (div_mode == 3) ? 1 : 0; @@ -4488,7 +4510,7 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) rt2800_bbp4_mac_if_ctrl(rt2x00dev); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); ant = (div_mode == 3) ? 1 : 0; rt2800_bbp_read(rt2x00dev, 152, &value); @@ -4557,7 +4579,7 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) } for (i = 0; i < EEPROM_BBP_SIZE; i++) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); @@ -4728,7 +4750,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt(rt2x00dev, RT3090)) { /* Turn off unused DAC1 and ADC1 to reduce power consumption */ rt2800_bbp_read(rt2x00dev, 138, &bbp); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) @@ -4778,7 +4800,7 @@ static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev) /* Turn off unused DAC1 and ADC1 to reduce power consumption */ rt2800_bbp_read(rt2x00dev, 138, ®); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) rt2x00_set_field8(®, BBP138_RX_ADC1, 0); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) @@ -4884,7 +4906,8 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1); if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, + &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST)) rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3); else @@ -5456,15 +5479,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev) /* * Initialize LED control */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word); rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff, word & 0xff, (word >> 8) & 0xff); - rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word); rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff, word & 0xff, (word >> 8) & 0xff); - rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word); rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff, word & 0xff, (word >> 8) & 0xff); @@ -5578,18 +5601,18 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) /* * Start validation of the data that has been read. */ - mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); if (!is_valid_ether_addr(mac)) { eth_random_addr(mac); rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac); } - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2); rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1); rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820); - rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } else if (rt2x00_rt(rt2x00dev, RT2860) || rt2x00_rt(rt2x00dev, RT2872)) { @@ -5598,10 +5621,10 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) */ if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2) rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2); - rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); } - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0); @@ -5618,24 +5641,24 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0); - rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } - rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); if ((word & 0x00ff) == 0x00ff) { rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); - rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word); rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word); } if ((word & 0xff00) == 0xff00) { rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE, LED_MODE_TXRX_ACTIVITY); rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0); - rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); - rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555); - rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221); - rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8); + rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555); + rt2800_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221); + rt2800_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8); rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word); } @@ -5644,17 +5667,17 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) * lna0 as correct value. Note that EEPROM_LNA * is never validated. */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &word); default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0); - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0); - rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word); if ((word & 0x00ff) != 0x00ff) { drv_data->txmixer_gain_24g = rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL); @@ -5662,16 +5685,16 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) drv_data->txmixer_gain_24g = 0; } - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0); if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 || rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff) rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1, default_lna_gain); - rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word); - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word); if ((word & 0x00ff) != 0x00ff) { drv_data->txmixer_gain_5g = rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL); @@ -5679,21 +5702,21 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) drv_data->txmixer_gain_5g = 0; } - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0); - rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word); - rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word); + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0); if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 || rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff) rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2, default_lna_gain); - rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); + rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); return 0; } @@ -5707,7 +5730,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Read EEPROM word for configuration. */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); /* * Identify RF chipset by EEPROM value @@ -5717,7 +5740,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt(rt2x00dev, RT3290) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392)) - rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf); + rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf); else rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); @@ -5757,7 +5780,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00dev->default_ant.rx_chain_num = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3090) || @@ -5810,7 +5833,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Read frequency offset and RF programming sequence. */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); /* @@ -5827,7 +5850,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Check if support EIRP tx power limit feature. */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) < EIRP_MAX_TX_POWER_LIMIT) @@ -6148,7 +6171,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, - rt2x00_eeprom_addr(rt2x00dev, + rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* @@ -6164,7 +6187,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) rt2x00dev->hw->max_report_rates = 7; rt2x00dev->hw->max_rate_tries = 1; - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); /* * Initialize hw_mode information. @@ -6264,8 +6287,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; - default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); - default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); + default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); + default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); for (i = 0; i < 14; i++) { info[i].default_power1 = default_power1[i]; @@ -6273,8 +6296,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) } if (spec->num_channels > 14) { - default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); - default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); + default_power1 = rt2800_eeprom_addr(rt2x00dev, + EEPROM_TXPOWER_A1); + default_power2 = rt2800_eeprom_addr(rt2x00dev, + EEPROM_TXPOWER_A2); for (i = 14; i < spec->num_channels; i++) { info[i].default_power1 = default_power1[i - 14]; -- cgit v0.10.2 From 022138ca93f016374d5d3f69c070c75596c5ecac Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 11:25:54 +0200 Subject: rt2x00: rt2800lib: introduce rt2800_eeprom_read_from_array helper Add a new helper function and use that for reading single elements of various arrays in the EEPROM. The patch does not change the current behaviour, but it allows to use sequential values for the rt2800_eeprom_word enums. The conversion will be implemented in a subsequent change. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 522f0b1..b59772a 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -239,6 +239,14 @@ static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev, rt2x00_eeprom_write(rt2x00dev, word, data); } +static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev, + const enum rt2800_eeprom_word array, + unsigned int offset, + u16 *data) +{ + rt2x00_eeprom_read(rt2x00dev, array + offset, data); +} + static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) { u32 reg; @@ -2995,8 +3003,8 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, * .11b data rate need add additional 4dbm * when calculating eirp txpower. */ - rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1, - &eeprom); + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + 1, &eeprom); criterion = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); @@ -3101,8 +3109,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, rt2800_register_read(rt2x00dev, offset, ®); /* read the next four txpower values */ - rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, - &eeprom); + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + i, &eeprom); is_rate_b = i ? 0 : 1; /* @@ -3150,8 +3158,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, TX_PWR_CFG_RATE3, txpower); /* read the next four txpower values */ - rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, - &eeprom); + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + i + 1, &eeprom); is_rate_b = 0; /* @@ -4579,7 +4587,8 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) } for (i = 0; i < EEPROM_BBP_SIZE; i++) { - rt2800_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i, + &eeprom); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); -- cgit v0.10.2 From 379448fe34e289fdcc473399d4f6cac19e757fb8 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 11:25:55 +0200 Subject: rt2x00: rt2800lib: introduce rt2800_eeprom_word_index helper Instead of assign the offset value to the enum directly use a new helper function to convert a rt2800_eeprom_word enum into an index of the rt2x00_dev->eeprom array. The patch does not change the existing behaviour, but makes it possible to add support for three-chain devices which are using a different EEPROM layout. Signed-off-by: Gabor Juhos Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 0647039..bc5c695 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2207,43 +2207,45 @@ struct mac_iveiv_entry { */ enum rt2800_eeprom_word { - EEPROM_CHIP_ID = 0x0000, - EEPROM_VERSION = 0x0001, - EEPROM_MAC_ADDR_0 = 0x0002, - EEPROM_MAC_ADDR_1 = 0x0003, - EEPROM_MAC_ADDR_2 = 0x0004, - EEPROM_NIC_CONF0 = 0x001a, - EEPROM_NIC_CONF1 = 0x001b, - EEPROM_FREQ = 0x001d, - EEPROM_LED_AG_CONF = 0x001e, - EEPROM_LED_ACT_CONF = 0x001f, - EEPROM_LED_POLARITY = 0x0020, - EEPROM_NIC_CONF2 = 0x0021, - EEPROM_LNA = 0x0022, - EEPROM_RSSI_BG = 0x0023, - EEPROM_RSSI_BG2 = 0x0024, - EEPROM_TXMIXER_GAIN_BG = 0x0024, /* overlaps with RSSI_BG2 */ - EEPROM_RSSI_A = 0x0025, - EEPROM_RSSI_A2 = 0x0026, - EEPROM_TXMIXER_GAIN_A = 0x0026, /* overlaps with RSSI_A2 */ - EEPROM_EIRP_MAX_TX_POWER = 0x0027, - EEPROM_TXPOWER_DELTA = 0x0028, - EEPROM_TXPOWER_BG1 = 0x0029, - EEPROM_TXPOWER_BG2 = 0x0030, - EEPROM_TSSI_BOUND_BG1 = 0x0037, - EEPROM_TSSI_BOUND_BG2 = 0x0038, - EEPROM_TSSI_BOUND_BG3 = 0x0039, - EEPROM_TSSI_BOUND_BG4 = 0x003a, - EEPROM_TSSI_BOUND_BG5 = 0x003b, - EEPROM_TXPOWER_A1 = 0x003c, - EEPROM_TXPOWER_A2 = 0x0053, - EEPROM_TSSI_BOUND_A1 = 0x006a, - EEPROM_TSSI_BOUND_A2 = 0x006b, - EEPROM_TSSI_BOUND_A3 = 0x006c, - EEPROM_TSSI_BOUND_A4 = 0x006d, - EEPROM_TSSI_BOUND_A5 = 0x006e, - EEPROM_TXPOWER_BYRATE = 0x006f, - EEPROM_BBP_START = 0x0078, + EEPROM_CHIP_ID = 0, + EEPROM_VERSION, + EEPROM_MAC_ADDR_0, + EEPROM_MAC_ADDR_1, + EEPROM_MAC_ADDR_2, + EEPROM_NIC_CONF0, + EEPROM_NIC_CONF1, + EEPROM_FREQ, + EEPROM_LED_AG_CONF, + EEPROM_LED_ACT_CONF, + EEPROM_LED_POLARITY, + EEPROM_NIC_CONF2, + EEPROM_LNA, + EEPROM_RSSI_BG, + EEPROM_RSSI_BG2, + EEPROM_TXMIXER_GAIN_BG, + EEPROM_RSSI_A, + EEPROM_RSSI_A2, + EEPROM_TXMIXER_GAIN_A, + EEPROM_EIRP_MAX_TX_POWER, + EEPROM_TXPOWER_DELTA, + EEPROM_TXPOWER_BG1, + EEPROM_TXPOWER_BG2, + EEPROM_TSSI_BOUND_BG1, + EEPROM_TSSI_BOUND_BG2, + EEPROM_TSSI_BOUND_BG3, + EEPROM_TSSI_BOUND_BG4, + EEPROM_TSSI_BOUND_BG5, + EEPROM_TXPOWER_A1, + EEPROM_TXPOWER_A2, + EEPROM_TSSI_BOUND_A1, + EEPROM_TSSI_BOUND_A2, + EEPROM_TSSI_BOUND_A3, + EEPROM_TSSI_BOUND_A4, + EEPROM_TSSI_BOUND_A5, + EEPROM_TXPOWER_BYRATE, + EEPROM_BBP_START, + /* New values must be added before this */ + EEPROM_WORD_COUNT }; /* diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index b59772a..41a34de4a 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -221,22 +221,98 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, mutex_unlock(&rt2x00dev->csr_mutex); } +static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = { + [EEPROM_CHIP_ID] = 0x0000, + [EEPROM_VERSION] = 0x0001, + [EEPROM_MAC_ADDR_0] = 0x0002, + [EEPROM_MAC_ADDR_1] = 0x0003, + [EEPROM_MAC_ADDR_2] = 0x0004, + [EEPROM_NIC_CONF0] = 0x001a, + [EEPROM_NIC_CONF1] = 0x001b, + [EEPROM_FREQ] = 0x001d, + [EEPROM_LED_AG_CONF] = 0x001e, + [EEPROM_LED_ACT_CONF] = 0x001f, + [EEPROM_LED_POLARITY] = 0x0020, + [EEPROM_NIC_CONF2] = 0x0021, + [EEPROM_LNA] = 0x0022, + [EEPROM_RSSI_BG] = 0x0023, + [EEPROM_RSSI_BG2] = 0x0024, + [EEPROM_TXMIXER_GAIN_BG] = 0x0024, /* overlaps with RSSI_BG2 */ + [EEPROM_RSSI_A] = 0x0025, + [EEPROM_RSSI_A2] = 0x0026, + [EEPROM_TXMIXER_GAIN_A] = 0x0026, /* overlaps with RSSI_A2 */ + [EEPROM_EIRP_MAX_TX_POWER] = 0x0027, + [EEPROM_TXPOWER_DELTA] = 0x0028, + [EEPROM_TXPOWER_BG1] = 0x0029, + [EEPROM_TXPOWER_BG2] = 0x0030, + [EEPROM_TSSI_BOUND_BG1] = 0x0037, + [EEPROM_TSSI_BOUND_BG2] = 0x0038, + [EEPROM_TSSI_BOUND_BG3] = 0x0039, + [EEPROM_TSSI_BOUND_BG4] = 0x003a, + [EEPROM_TSSI_BOUND_BG5] = 0x003b, + [EEPROM_TXPOWER_A1] = 0x003c, + [EEPROM_TXPOWER_A2] = 0x0053, + [EEPROM_TSSI_BOUND_A1] = 0x006a, + [EEPROM_TSSI_BOUND_A2] = 0x006b, + [EEPROM_TSSI_BOUND_A3] = 0x006c, + [EEPROM_TSSI_BOUND_A4] = 0x006d, + [EEPROM_TSSI_BOUND_A5] = 0x006e, + [EEPROM_TXPOWER_BYRATE] = 0x006f, + [EEPROM_BBP_START] = 0x0078, +}; + +static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev, + const enum rt2800_eeprom_word word) +{ + const unsigned int *map; + unsigned int index; + + if (WARN_ONCE(word >= EEPROM_WORD_COUNT, + "%s: invalid EEPROM word %d\n", + wiphy_name(rt2x00dev->hw->wiphy), word)) + return 0; + + map = rt2800_eeprom_map; + index = map[word]; + + /* Index 0 is valid only for EEPROM_CHIP_ID. + * Otherwise it means that the offset of the + * given word is not initialized in the map, + * or that the field is not usable on the + * actual chipset. + */ + WARN_ONCE(word != EEPROM_CHIP_ID && index == 0, + "%s: invalid access of EEPROM word %d\n", + wiphy_name(rt2x00dev->hw->wiphy), word); + + return index; +} + static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word) { - return rt2x00_eeprom_addr(rt2x00dev, word); + unsigned int index; + + index = rt2800_eeprom_word_index(rt2x00dev, word); + return rt2x00_eeprom_addr(rt2x00dev, index); } static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word, u16 *data) { - rt2x00_eeprom_read(rt2x00dev, word, data); + unsigned int index; + + index = rt2800_eeprom_word_index(rt2x00dev, word); + rt2x00_eeprom_read(rt2x00dev, index, data); } static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word, u16 data) { - rt2x00_eeprom_write(rt2x00dev, word, data); + unsigned int index; + + index = rt2800_eeprom_word_index(rt2x00dev, word); + rt2x00_eeprom_write(rt2x00dev, index, data); } static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev, @@ -244,7 +320,10 @@ static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev, unsigned int offset, u16 *data) { - rt2x00_eeprom_read(rt2x00dev, array + offset, data); + unsigned int index; + + index = rt2800_eeprom_word_index(rt2x00dev, array); + rt2x00_eeprom_read(rt2x00dev, index + offset, data); } static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) -- cgit v0.10.2 From fa31d157f83ef71b6530aacf0400bafe7816acbd Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 11:25:56 +0200 Subject: rt2x00: rt2800lib: add EEPROM map for the RT3593 chipset Three-chain devices are using a different EEPROM layout than the rest of the chipsets. Add a new map which describes the new layout and use that for the RT3593 chipset. The index values has been computed from the EEPROM_EXT_* defines, which can be found in the 'include/chip/rt3593.h' file in the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index bc5c695..9216834 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2244,6 +2244,12 @@ enum rt2800_eeprom_word { EEPROM_TSSI_BOUND_A5, EEPROM_TXPOWER_BYRATE, EEPROM_BBP_START, + + /* IDs for extended EEPROM format used by three-chain devices */ + EEPROM_EXT_LNA2, + EEPROM_EXT_TXPOWER_BG3, + EEPROM_EXT_TXPOWER_A3, + /* New values must be added before this */ EEPROM_WORD_COUNT }; diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 41a34de4a..d325ca2 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -261,6 +261,48 @@ static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = { [EEPROM_BBP_START] = 0x0078, }; +static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = { + [EEPROM_CHIP_ID] = 0x0000, + [EEPROM_VERSION] = 0x0001, + [EEPROM_MAC_ADDR_0] = 0x0002, + [EEPROM_MAC_ADDR_1] = 0x0003, + [EEPROM_MAC_ADDR_2] = 0x0004, + [EEPROM_NIC_CONF0] = 0x001a, + [EEPROM_NIC_CONF1] = 0x001b, + [EEPROM_NIC_CONF2] = 0x001c, + [EEPROM_EIRP_MAX_TX_POWER] = 0x0020, + [EEPROM_FREQ] = 0x0022, + [EEPROM_LED_AG_CONF] = 0x0023, + [EEPROM_LED_ACT_CONF] = 0x0024, + [EEPROM_LED_POLARITY] = 0x0025, + [EEPROM_LNA] = 0x0026, + [EEPROM_EXT_LNA2] = 0x0027, + [EEPROM_RSSI_BG] = 0x0028, + [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */ + [EEPROM_RSSI_BG2] = 0x0029, + [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */ + [EEPROM_RSSI_A] = 0x002a, + [EEPROM_RSSI_A2] = 0x002b, + [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */ + [EEPROM_TXPOWER_BG1] = 0x0030, + [EEPROM_TXPOWER_BG2] = 0x0037, + [EEPROM_EXT_TXPOWER_BG3] = 0x003e, + [EEPROM_TSSI_BOUND_BG1] = 0x0045, + [EEPROM_TSSI_BOUND_BG2] = 0x0046, + [EEPROM_TSSI_BOUND_BG3] = 0x0047, + [EEPROM_TSSI_BOUND_BG4] = 0x0048, + [EEPROM_TSSI_BOUND_BG5] = 0x0049, + [EEPROM_TXPOWER_A1] = 0x004b, + [EEPROM_TXPOWER_A2] = 0x0065, + [EEPROM_EXT_TXPOWER_A3] = 0x007f, + [EEPROM_TSSI_BOUND_A1] = 0x009a, + [EEPROM_TSSI_BOUND_A2] = 0x009b, + [EEPROM_TSSI_BOUND_A3] = 0x009c, + [EEPROM_TSSI_BOUND_A4] = 0x009d, + [EEPROM_TSSI_BOUND_A5] = 0x009e, + [EEPROM_TXPOWER_BYRATE] = 0x00a0, +}; + static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word) { @@ -272,7 +314,11 @@ static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev, wiphy_name(rt2x00dev->hw->wiphy), word)) return 0; - map = rt2800_eeprom_map; + if (rt2x00_rt(rt2x00dev, RT3593)) + map = rt2800_eeprom_map_ext; + else + map = rt2800_eeprom_map; + index = map[word]; /* Index 0 is valid only for EEPROM_CHIP_ID. -- cgit v0.10.2 From 1706d15d82f4a579119b419cd673987af60f1d9b Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:16 +0200 Subject: rt2x00: rt2800lib: add MAC register initialization for RT3593 Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: NICInitRT3593MacRegisters in chips/rt3593.c Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 9216834..02bc80d 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -88,6 +88,7 @@ #define REV_RT3071E 0x0211 #define REV_RT3090E 0x0211 #define REV_RT3390E 0x0211 +#define REV_RT3593E 0x0211 #define REV_RT5390F 0x0502 #define REV_RT5390R 0x1502 #define REV_RT5592C 0x0221 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index d325ca2..bfeca29 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3714,6 +3714,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) } else if (rt2x00_rt(rt2x00dev, RT3572)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); + } else if (rt2x00_rt(rt2x00dev, RT3593)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); + if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) { + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, + &eeprom); + if (rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_DAC_TEST)) + rt2800_register_write(rt2x00dev, TX_SW_CFG2, + 0x0000001f); + else + rt2800_register_write(rt2x00dev, TX_SW_CFG2, + 0x0000000f); + } else { + rt2800_register_write(rt2x00dev, TX_SW_CFG2, + 0x00000000); + } } else if (rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392) || rt2x00_rt(rt2x00dev, RT5592)) { -- cgit v0.10.2 From b189a1814135bc52f516ca61a1fa161914d57a54 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:17 +0200 Subject: rt2x00: rt2800lib: add BBP register initialization for RT3593 Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. References: NICInitRT3593BbpRegisters in chips/rt3593.c NICInitBBP in common/rtmp_init.c NICInitAsicFromEEPROM in common/rtmp_init.c Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index bfeca29..53ebdb9 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -4504,6 +4504,22 @@ static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev) rt2800_disable_unused_dac_adc(rt2x00dev); } +static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev) +{ + rt2800_init_bbp_early(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + rt2800_bbp_write(rt2x00dev, 137, 0x0f); + + rt2800_bbp_write(rt2x00dev, 84, 0x19); + + /* Enable DC filter */ + if (rt2x00_rt_rev_gte(rt2x00dev, RT3593, REV_RT3593E)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); +} + static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) { int ant, div_mode; @@ -4719,6 +4735,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) case RT3572: rt2800_init_bbp_3572(rt2x00dev); break; + case RT3593: + rt2800_init_bbp_3593(rt2x00dev); + return; case RT5390: case RT5392: rt2800_init_bbp_53xx(rt2x00dev); -- cgit v0.10.2 From ab7078ac3d920e0d49b17e92f327f3ada25600e8 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:18 +0200 Subject: rt2x00: rt2800lib: add RFCSR register initialization for RT3593 Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. References: NICInitRT3593RFRegisters in chips/rt3593.c RT3593LoadRFNormalModeSetup in chips/rt3593.c Signed-off-by: Gabor Juhos Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 02bc80d..47aceae 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2093,6 +2093,10 @@ struct mac_iveiv_entry { #define RFCSR17_R FIELD8(0x20) #define RFCSR17_CODE FIELD8(0x7f) +/* RFCSR 18 */ +#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40) + + /* * RFCSR 20: */ @@ -2174,6 +2178,12 @@ struct mac_iveiv_entry { */ #define RFCSR50_TX FIELD8(0x3f) #define RFCSR50_EP FIELD8(0xc0) +/* bits for RT3593*/ +#define RFCSR50_TX_LO2_EN FIELD8(0x10) + +/* RFCSR 51 */ +/* bits for RT3593*/ +#define RFCSR51_BITS24 FIELD8(0x1c) /* * RF registers diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 53ebdb9..b650569 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -4963,6 +4963,42 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev) } } +static void rt2800_normal_mode_setup_3593(struct rt2x00_dev *rt2x00dev) +{ + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + u8 rfcsr; + u8 tx_gain; + + rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0); + rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr); + tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g, + RFCSR17_TXMIXER_GAIN); + rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain); + rt2800_rfcsr_write(rt2x00dev, 51, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0); + rt2800_rfcsr_write(rt2x00dev, 38, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0); + rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); + rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); + rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + + /* TODO: enable stream mode */ +} + static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev) { u8 reg; @@ -5345,6 +5381,89 @@ static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev) rt2800_normal_mode_setup_3xxx(rt2x00dev); } +static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) +{ + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + u32 reg; + u8 rfcsr; + + /* Disable GPIO #4 and #7 function for LAN PE control */ + rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®); + rt2x00_set_field32(®, GPIO_SWITCH_4, 0); + rt2x00_set_field32(®, GPIO_SWITCH_7, 0); + rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg); + + /* Initialize default register values */ + rt2800_rfcsr_write(rt2x00dev, 1, 0x03); + rt2800_rfcsr_write(rt2x00dev, 3, 0x80); + rt2800_rfcsr_write(rt2x00dev, 5, 0x00); + rt2800_rfcsr_write(rt2x00dev, 6, 0x40); + rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); + rt2800_rfcsr_write(rt2x00dev, 9, 0x02); + rt2800_rfcsr_write(rt2x00dev, 10, 0xd3); + rt2800_rfcsr_write(rt2x00dev, 11, 0x40); + rt2800_rfcsr_write(rt2x00dev, 12, 0x4e); + rt2800_rfcsr_write(rt2x00dev, 13, 0x12); + rt2800_rfcsr_write(rt2x00dev, 18, 0x40); + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); + rt2800_rfcsr_write(rt2x00dev, 30, 0x10); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x78); + rt2800_rfcsr_write(rt2x00dev, 33, 0x3b); + rt2800_rfcsr_write(rt2x00dev, 34, 0x3c); + rt2800_rfcsr_write(rt2x00dev, 35, 0xe0); + rt2800_rfcsr_write(rt2x00dev, 38, 0x86); + rt2800_rfcsr_write(rt2x00dev, 39, 0x23); + rt2800_rfcsr_write(rt2x00dev, 44, 0xd3); + rt2800_rfcsr_write(rt2x00dev, 45, 0xbb); + rt2800_rfcsr_write(rt2x00dev, 46, 0x60); + rt2800_rfcsr_write(rt2x00dev, 49, 0x8e); + rt2800_rfcsr_write(rt2x00dev, 50, 0x86); + rt2800_rfcsr_write(rt2x00dev, 51, 0x75); + rt2800_rfcsr_write(rt2x00dev, 52, 0x45); + rt2800_rfcsr_write(rt2x00dev, 53, 0x18); + rt2800_rfcsr_write(rt2x00dev, 54, 0x18); + rt2800_rfcsr_write(rt2x00dev, 55, 0x18); + rt2800_rfcsr_write(rt2x00dev, 56, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 57, 0x6e); + + /* Initiate calibration */ + /* TODO: use rt2800_rf_init_calibration ? */ + rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); + rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); + + rt2800_adjust_freq_offset(rt2x00dev); + + rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1); + rt2800_rfcsr_write(rt2x00dev, 18, rfcsr); + + rt2800_register_read(rt2x00dev, LDO_CFG0, ®); + rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3); + rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1); + rt2800_register_write(rt2x00dev, LDO_CFG0, reg); + usleep_range(1000, 1500); + rt2800_register_read(rt2x00dev, LDO_CFG0, ®); + rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0); + rt2800_register_write(rt2x00dev, LDO_CFG0, reg); + + /* Set initial values for RX filter calibration */ + drv_data->calibration_bw20 = 0x1f; + drv_data->calibration_bw40 = 0x2f; + + /* Save BBP 25 & 26 values for later use in channel switching */ + rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25); + rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26); + + rt2800_led_open_drain_enable(rt2x00dev); + rt2800_normal_mode_setup_3593(rt2x00dev); + + /* TODO: post BBP initialization */ + + /* TODO: enable stream mode support */ +} + static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev) { rt2800_rf_init_calibration(rt2x00dev, 2); @@ -5573,6 +5692,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) case RT3572: rt2800_init_rfcsr_3572(rt2x00dev); break; + case RT3593: + rt2800_init_rfcsr_3593(rt2x00dev); + break; case RT5390: rt2800_init_rfcsr_5390(rt2x00dev); break; -- cgit v0.10.2 From d63f7e8ca560dc9a76a15c323cb9cba14b25f430 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:19 +0200 Subject: rt2x00: rt2800lib: add BBP post initialization for RT3593 Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: RT3593_PostBBPInitialization in chips/rt3553.c Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index b650569..7b5c593 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -5381,6 +5381,52 @@ static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev) rt2800_normal_mode_setup_3xxx(rt2x00dev); } +static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev) +{ + u8 bbp; + bool txbf_enabled = false; /* FIXME */ + + rt2800_bbp_read(rt2x00dev, 105, &bbp); + if (rt2x00dev->default_ant.rx_chain_num == 1) + rt2x00_set_field8(&bbp, BBP105_MLD, 0); + else + rt2x00_set_field8(&bbp, BBP105_MLD, 1); + rt2800_bbp_write(rt2x00dev, 105, bbp); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 92, 0x02); + rt2800_bbp_write(rt2x00dev, 82, 0x82); + rt2800_bbp_write(rt2x00dev, 106, 0x05); + rt2800_bbp_write(rt2x00dev, 104, 0x92); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 148, 0xc8); + rt2800_bbp_write(rt2x00dev, 47, 0x48); + rt2800_bbp_write(rt2x00dev, 120, 0x50); + + if (txbf_enabled) + rt2800_bbp_write(rt2x00dev, 163, 0xbd); + else + rt2800_bbp_write(rt2x00dev, 163, 0x9d); + + /* SNR mapping */ + rt2800_bbp_write(rt2x00dev, 142, 6); + rt2800_bbp_write(rt2x00dev, 143, 160); + rt2800_bbp_write(rt2x00dev, 142, 7); + rt2800_bbp_write(rt2x00dev, 143, 161); + rt2800_bbp_write(rt2x00dev, 142, 8); + rt2800_bbp_write(rt2x00dev, 143, 162); + + /* ADC/DAC control */ + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + /* RX AGC energy lower bound in log2 */ + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + /* FIXME: BBP 105 owerwrite? */ + rt2800_bbp_write(rt2x00dev, 105, 0x04); +} + static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; @@ -5459,7 +5505,7 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) rt2800_led_open_drain_enable(rt2x00dev); rt2800_normal_mode_setup_3593(rt2x00dev); - /* TODO: post BBP initialization */ + rt3593_post_bbp_init(rt2x00dev); /* TODO: enable stream mode support */ } -- cgit v0.10.2 From 34542ff5a665061d548c3f860807df341f718adf Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:20 +0200 Subject: rt2x00: rt2800lib: add TX power configuration for RT3593 Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. References: RTMPReadTxPwrPerRateExt in chips/rt3593.c RT3593_AsicGetTxPowerOffset in chips/rt3593.c Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 47aceae..7b7caeb 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -1083,6 +1083,15 @@ #define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000) #define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000) #define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000) +/* bits for 3T devices */ +#define TX_PWR_CFG_0_CCK1_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_0_CCK1_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_0_CCK5_CH0 FIELD32(0x00000f00) +#define TX_PWR_CFG_0_CCK5_CH1 FIELD32(0x0000f000) +#define TX_PWR_CFG_0_OFDM6_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_0_OFDM6_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_0_OFDM12_CH0 FIELD32(0x0f000000) +#define TX_PWR_CFG_0_OFDM12_CH1 FIELD32(0xf0000000) /* * TX_PWR_CFG_1: @@ -1096,6 +1105,15 @@ #define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000) #define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000) #define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000) +/* bits for 3T devices */ +#define TX_PWR_CFG_1_OFDM24_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_1_OFDM24_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_1_OFDM48_CH0 FIELD32(0x00000f00) +#define TX_PWR_CFG_1_OFDM48_CH1 FIELD32(0x0000f000) +#define TX_PWR_CFG_1_MCS0_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_1_MCS0_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_1_MCS2_CH0 FIELD32(0x0f000000) +#define TX_PWR_CFG_1_MCS2_CH1 FIELD32(0xf0000000) /* * TX_PWR_CFG_2: @@ -1109,6 +1127,15 @@ #define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000) #define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000) #define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000) +/* bits for 3T devices */ +#define TX_PWR_CFG_2_MCS4_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_2_MCS4_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_2_MCS6_CH0 FIELD32(0x00000f00) +#define TX_PWR_CFG_2_MCS6_CH1 FIELD32(0x0000f000) +#define TX_PWR_CFG_2_MCS8_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_2_MCS8_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_2_MCS10_CH0 FIELD32(0x0f000000) +#define TX_PWR_CFG_2_MCS10_CH1 FIELD32(0xf0000000) /* * TX_PWR_CFG_3: @@ -1122,6 +1149,15 @@ #define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000) #define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000) #define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000) +/* bits for 3T devices */ +#define TX_PWR_CFG_3_MCS12_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_3_MCS12_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_3_MCS14_CH0 FIELD32(0x00000f00) +#define TX_PWR_CFG_3_MCS14_CH1 FIELD32(0x0000f000) +#define TX_PWR_CFG_3_STBC0_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_3_STBC0_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_3_STBC2_CH0 FIELD32(0x0f000000) +#define TX_PWR_CFG_3_STBC2_CH1 FIELD32(0xf0000000) /* * TX_PWR_CFG_4: @@ -1131,6 +1167,11 @@ #define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0) #define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00) #define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000) +/* bits for 3T devices */ +#define TX_PWR_CFG_3_STBC4_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_3_STBC4_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_3_STBC6_CH0 FIELD32(0x00000f00) +#define TX_PWR_CFG_3_STBC6_CH1 FIELD32(0x0000f000) /* * TX_PIN_CFG: @@ -1452,6 +1493,81 @@ */ #define EXP_ACK_TIME 0x1380 +/* TX_PWR_CFG_5 */ +#define TX_PWR_CFG_5 0x1384 +#define TX_PWR_CFG_5_MCS16_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_5_MCS16_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_5_MCS16_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_5_MCS18_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_5_MCS18_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_5_MCS18_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_6 */ +#define TX_PWR_CFG_6 0x1388 +#define TX_PWR_CFG_6_MCS20_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_6_MCS20_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_6_MCS20_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_6_MCS22_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_6_MCS22_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_6_MCS22_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_0_EXT */ +#define TX_PWR_CFG_0_EXT 0x1390 +#define TX_PWR_CFG_0_EXT_CCK1_CH2 FIELD32(0x0000000f) +#define TX_PWR_CFG_0_EXT_CCK5_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_0_EXT_OFDM6_CH2 FIELD32(0x000f0000) +#define TX_PWR_CFG_0_EXT_OFDM12_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_1_EXT */ +#define TX_PWR_CFG_1_EXT 0x1394 +#define TX_PWR_CFG_1_EXT_OFDM24_CH2 FIELD32(0x0000000f) +#define TX_PWR_CFG_1_EXT_OFDM48_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_1_EXT_MCS0_CH2 FIELD32(0x000f0000) +#define TX_PWR_CFG_1_EXT_MCS2_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_2_EXT */ +#define TX_PWR_CFG_2_EXT 0x1398 +#define TX_PWR_CFG_2_EXT_MCS4_CH2 FIELD32(0x0000000f) +#define TX_PWR_CFG_2_EXT_MCS6_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_2_EXT_MCS8_CH2 FIELD32(0x000f0000) +#define TX_PWR_CFG_2_EXT_MCS10_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_3_EXT */ +#define TX_PWR_CFG_3_EXT 0x139c +#define TX_PWR_CFG_3_EXT_MCS12_CH2 FIELD32(0x0000000f) +#define TX_PWR_CFG_3_EXT_MCS14_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_3_EXT_STBC0_CH2 FIELD32(0x000f0000) +#define TX_PWR_CFG_3_EXT_STBC2_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_4_EXT */ +#define TX_PWR_CFG_4_EXT 0x13a0 +#define TX_PWR_CFG_4_EXT_STBC4_CH2 FIELD32(0x0000000f) +#define TX_PWR_CFG_4_EXT_STBC6_CH2 FIELD32(0x00000f00) + +/* TX_PWR_CFG_7 */ +#define TX_PWR_CFG_7 0x13d4 +#define TX_PWR_CFG_7_OFDM54_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_7_OFDM54_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_7_OFDM54_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_7_MCS7_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_7_MCS7_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_7_MCS7_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_8 */ +#define TX_PWR_CFG_8 0x13d8 +#define TX_PWR_CFG_8_MCS15_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_8_MCS15_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_8_MCS15_CH2 FIELD32(0x00000f00) +#define TX_PWR_CFG_8_MCS23_CH0 FIELD32(0x000f0000) +#define TX_PWR_CFG_8_MCS23_CH1 FIELD32(0x00f00000) +#define TX_PWR_CFG_8_MCS23_CH2 FIELD32(0x0f000000) + +/* TX_PWR_CFG_9 */ +#define TX_PWR_CFG_9 0x13dc +#define TX_PWR_CFG_9_STBC7_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_9_STBC7_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_9_STBC7_CH2 FIELD32(0x00000f00) + /* * RX_FILTER_CFG: RX configuration register. */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 7b5c593..e817278 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3120,6 +3120,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, u8 eirp_txpower_criterion; u8 reg_limit; + if (rt2x00_rt(rt2x00dev, RT3593)) + return min_t(u8, txpower, 0xc); + if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) { /* * Check if eirp txpower exceed txpower_limit. @@ -3155,6 +3158,412 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, return min_t(u8, txpower, 0xc); } + +enum { + TX_PWR_CFG_0_IDX, + TX_PWR_CFG_1_IDX, + TX_PWR_CFG_2_IDX, + TX_PWR_CFG_3_IDX, + TX_PWR_CFG_4_IDX, + TX_PWR_CFG_5_IDX, + TX_PWR_CFG_6_IDX, + TX_PWR_CFG_7_IDX, + TX_PWR_CFG_8_IDX, + TX_PWR_CFG_9_IDX, + TX_PWR_CFG_0_EXT_IDX, + TX_PWR_CFG_1_EXT_IDX, + TX_PWR_CFG_2_EXT_IDX, + TX_PWR_CFG_3_EXT_IDX, + TX_PWR_CFG_4_EXT_IDX, + TX_PWR_CFG_IDX_COUNT, +}; + +static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev, + struct ieee80211_channel *chan, + int power_level) +{ + u8 txpower; + u16 eeprom; + u32 regs[TX_PWR_CFG_IDX_COUNT]; + unsigned int offset; + enum ieee80211_band band = chan->band; + int delta; + int i; + + memset(regs, '\0', sizeof(regs)); + + /* TODO: adapt TX power reduction from the rt28xx code */ + + /* calculate temperature compensation delta */ + delta = rt2800_get_gain_calibration_delta(rt2x00dev); + + if (band == IEEE80211_BAND_5GHZ) + offset = 16; + else + offset = 0; + + if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) + offset += 8; + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset, &eeprom); + + /* CCK 1MBS,2MBS */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_CCK1_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_CCK1_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX], + TX_PWR_CFG_0_EXT_CCK1_CH2, txpower); + + /* CCK 5.5MBS,11MBS */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); + txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_CCK5_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_CCK5_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX], + TX_PWR_CFG_0_EXT_CCK5_CH2, txpower); + + /* OFDM 6MBS,9MBS */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_OFDM6_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_OFDM6_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX], + TX_PWR_CFG_0_EXT_OFDM6_CH2, txpower); + + /* OFDM 12MBS,18MBS */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_OFDM12_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_IDX], + TX_PWR_CFG_0_OFDM12_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_0_EXT_IDX], + TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower); + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset + 1, &eeprom); + + /* OFDM 24MBS,36MBS */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_OFDM24_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_OFDM24_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX], + TX_PWR_CFG_1_EXT_OFDM24_CH2, txpower); + + /* OFDM 48MBS */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_OFDM48_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_OFDM48_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX], + TX_PWR_CFG_1_EXT_OFDM48_CH2, txpower); + + /* OFDM 54MBS */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX], + TX_PWR_CFG_7_OFDM54_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX], + TX_PWR_CFG_7_OFDM54_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX], + TX_PWR_CFG_7_OFDM54_CH2, txpower); + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset + 2, &eeprom); + + /* MCS 0,1 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_MCS0_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_MCS0_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX], + TX_PWR_CFG_1_EXT_MCS0_CH2, txpower); + + /* MCS 2,3 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_MCS2_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_IDX], + TX_PWR_CFG_1_MCS2_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_1_EXT_IDX], + TX_PWR_CFG_1_EXT_MCS2_CH2, txpower); + + /* MCS 4,5 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS4_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS4_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX], + TX_PWR_CFG_2_EXT_MCS4_CH2, txpower); + + /* MCS 6 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS6_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS6_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX], + TX_PWR_CFG_2_EXT_MCS6_CH2, txpower); + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset + 3, &eeprom); + + /* MCS 7 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX], + TX_PWR_CFG_7_MCS7_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX], + TX_PWR_CFG_7_MCS7_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_7_IDX], + TX_PWR_CFG_7_MCS7_CH2, txpower); + + /* MCS 8,9 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS8_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS8_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX], + TX_PWR_CFG_2_EXT_MCS8_CH2, txpower); + + /* MCS 10,11 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS10_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_IDX], + TX_PWR_CFG_2_MCS10_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_2_EXT_IDX], + TX_PWR_CFG_2_EXT_MCS10_CH2, txpower); + + /* MCS 12,13 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_MCS12_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_MCS12_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX], + TX_PWR_CFG_3_EXT_MCS12_CH2, txpower); + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset + 4, &eeprom); + + /* MCS 14 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_MCS14_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_MCS14_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX], + TX_PWR_CFG_3_EXT_MCS14_CH2, txpower); + + /* MCS 15 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX], + TX_PWR_CFG_8_MCS15_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX], + TX_PWR_CFG_8_MCS15_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX], + TX_PWR_CFG_8_MCS15_CH2, txpower); + + /* MCS 16,17 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX], + TX_PWR_CFG_5_MCS16_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX], + TX_PWR_CFG_5_MCS16_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX], + TX_PWR_CFG_5_MCS16_CH2, txpower); + + /* MCS 18,19 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX], + TX_PWR_CFG_5_MCS18_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX], + TX_PWR_CFG_5_MCS18_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_5_IDX], + TX_PWR_CFG_5_MCS18_CH2, txpower); + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset + 5, &eeprom); + + /* MCS 20,21 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX], + TX_PWR_CFG_6_MCS20_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX], + TX_PWR_CFG_6_MCS20_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX], + TX_PWR_CFG_6_MCS20_CH2, txpower); + + /* MCS 22 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX], + TX_PWR_CFG_6_MCS22_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX], + TX_PWR_CFG_6_MCS22_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_6_IDX], + TX_PWR_CFG_6_MCS22_CH2, txpower); + + /* MCS 23 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX], + TX_PWR_CFG_8_MCS23_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX], + TX_PWR_CFG_8_MCS23_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_8_IDX], + TX_PWR_CFG_8_MCS23_CH2, txpower); + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset + 6, &eeprom); + + /* STBC, MCS 0,1 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_STBC0_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_STBC0_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX], + TX_PWR_CFG_3_EXT_STBC0_CH2, txpower); + + /* STBC, MCS 2,3 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_STBC2_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_IDX], + TX_PWR_CFG_3_STBC2_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_3_EXT_IDX], + TX_PWR_CFG_3_EXT_STBC2_CH2, txpower); + + /* STBC, MCS 4,5 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE0, + txpower); + + /* STBC, MCS 6 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE2, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE3, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE2, + txpower); + + /* read the next four txpower values */ + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + offset + 7, &eeprom); + + /* STBC, MCS 7 */ + txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); + txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, + txpower, delta); + rt2x00_set_field32(®s[TX_PWR_CFG_9_IDX], + TX_PWR_CFG_9_STBC7_CH0, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_9_IDX], + TX_PWR_CFG_9_STBC7_CH1, txpower); + rt2x00_set_field32(®s[TX_PWR_CFG_9_IDX], + TX_PWR_CFG_9_STBC7_CH2, txpower); + + rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, regs[TX_PWR_CFG_0_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, regs[TX_PWR_CFG_1_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, regs[TX_PWR_CFG_2_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, regs[TX_PWR_CFG_3_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, regs[TX_PWR_CFG_4_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_5, regs[TX_PWR_CFG_5_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_6, regs[TX_PWR_CFG_6_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, regs[TX_PWR_CFG_7_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, regs[TX_PWR_CFG_8_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, regs[TX_PWR_CFG_9_IDX]); + + rt2800_register_write(rt2x00dev, TX_PWR_CFG_0_EXT, + regs[TX_PWR_CFG_0_EXT_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_1_EXT, + regs[TX_PWR_CFG_1_EXT_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_2_EXT, + regs[TX_PWR_CFG_2_EXT_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_3_EXT, + regs[TX_PWR_CFG_3_EXT_IDX]); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_4_EXT, + regs[TX_PWR_CFG_4_EXT_IDX]); + + for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++) + rt2x00_dbg(rt2x00dev, + "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n", + (band == IEEE80211_BAND_5GHZ) ? '5' : '2', + (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ? + '4' : '2', + (i > TX_PWR_CFG_9_IDX) ? + (i - TX_PWR_CFG_9_IDX - 1) : i, + (i > TX_PWR_CFG_9_IDX) ? "_EXT" : "", + (unsigned long) regs[i]); +} + /* * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values, @@ -3164,9 +3573,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to * current conditions (i.e. band, bandwidth, temperature, user settings). */ -static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, - struct ieee80211_channel *chan, - int power_level) +static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev, + struct ieee80211_channel *chan, + int power_level) { u8 txpower, r1; u16 eeprom; @@ -3338,6 +3747,16 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, } } +static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, + struct ieee80211_channel *chan, + int power_level) +{ + if (rt2x00_rt(rt2x00dev, RT3593)) + rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level); + else + rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level); +} + void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev) { rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan, -- cgit v0.10.2 From 4788ac1e4842d8ef46ee620cfccf96c426043177 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:21 +0200 Subject: rt2x00: rt2800lib: fix BBP1_TX_ANTENNA field configuration for 3T devices The field must be set to 2 instead of 0 for devices with three TX chains. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index e817278..1a6a4eb 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1763,7 +1763,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); break; case 3: - rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); + rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); break; } -- cgit v0.10.2 From 5cddb3c2d5102d9a6b1b809e1518da54c8be8296 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:22 +0200 Subject: rt2x00: rt2800lib: fix antenna configuration for RT3593 On the RT3593 chipset, BBP register 86 must be configured by different values based on the RX antenna numbers. Configure this register from the 'rt2800_config_ant' function. Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: RT3593_CONFIG_SET_BY_ANTENNA in include/chip/rt3593.h Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1a6a4eb..958215c 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1803,6 +1803,13 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) rt2800_bbp_write(rt2x00dev, 3, r3); rt2800_bbp_write(rt2x00dev, 1, r1); + + if (rt2x00_rt(rt2x00dev, RT3593)) { + if (ant->rx_chain_num == 1) + rt2800_bbp_write(rt2x00dev, 86, 0x00); + else + rt2800_bbp_write(rt2x00dev, 86, 0x46); + } } EXPORT_SYMBOL_GPL(rt2800_config_ant); -- cgit v0.10.2 From 97aa03f15e83174df74aa468eea127c5cee480f0 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:23 +0200 Subject: rt2x00: rt2800lib: add rt2800_txpower_to_dev helper Introduce a new helper function for converting the default TX power values from EEPROM into mac80211 values. The change improves the readability and it makes it easier to add support for other chipsets. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 7b7caeb..d3e8f6d 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2887,15 +2887,9 @@ enum rt2800_eeprom_word { #define TXPOWER_G_FROM_DEV(__txpower) \ ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) -#define TXPOWER_G_TO_DEV(__txpower) \ - clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER) - #define TXPOWER_A_FROM_DEV(__txpower) \ ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) -#define TXPOWER_A_TO_DEV(__txpower) \ - clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER) - /* * Board's maximun TX power limitation */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 958215c..1738a9d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2724,6 +2724,16 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel) rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0); } +static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev, + unsigned int channel, + char txpower) +{ + if (channel <= 14) + return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER); + else + return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER); +} + static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, @@ -2733,13 +2743,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, unsigned int tx_pin; u8 bbp, rfcsr; - if (rf->channel <= 14) { - info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1); - info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2); - } else { - info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1); - info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2); - } + info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel, + info->default_power1); + info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel, + info->default_power2); switch (rt2x00dev->chip.rf) { case RF2020: -- cgit v0.10.2 From fc739cfe0f305647677edbf99a76d9ece96e3795 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:24 +0200 Subject: rt2x00: rt2800lib: fix default TX power values for RT3593 The TX power values in the EEPROM are using a different format for the RT3593 chip. The default TX power value uses bits 0..4 only. Bits 5..8 contains value for fine grained power control. Additionally, the lower and upper limits of the TX power values are the same for both bands. Improve the rt2800_txpower_to_dev function, in order to compute the correct default power values for the RT3593 chip as well. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index d3e8f6d..69749ba 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2603,6 +2603,10 @@ enum rt2800_eeprom_word { #define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) #define EEPROM_TXPOWER_A_2 FIELD16(0xff00) +/* EEPROM_TXPOWER_{A,G} fields for RT3593 */ +#define EEPROM_TXPOWER_ALC FIELD8(0x1f) +#define EEPROM_TXPOWER_FINE_CTRL FIELD8(0xe0) + /* * EEPROM temperature compensation boundaries 802.11A * MINUS4: If the actual TSSI is below this boundary, tx power needs to be @@ -2884,6 +2888,9 @@ enum rt2800_eeprom_word { #define MAX_A_TXPOWER 15 #define DEFAULT_TXPOWER 5 +#define MIN_A_TXPOWER_3593 0 +#define MAX_A_TXPOWER_3593 31 + #define TXPOWER_G_FROM_DEV(__txpower) \ ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1738a9d..b11f0b0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2728,8 +2728,15 @@ static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev, unsigned int channel, char txpower) { + if (rt2x00_rt(rt2x00dev, RT3593)) + txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC); + if (channel <= 14) return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER); + + if (rt2x00_rt(rt2x00dev, RT3593)) + return clamp_t(char, txpower, MIN_A_TXPOWER_3593, + MAX_A_TXPOWER_3593); else return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER); } -- cgit v0.10.2 From a3f1625dae58f06c5df1ec0094b275e9a46fd8b3 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:25 +0200 Subject: rt2x00: rt2800lib: introduce rt2800_get_txmixer_gain_{24,5}g helpers Move the TX mixer gain reading code into separate helper functions in preparation for RT3593 support. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index b11f0b0..1f32732 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -6361,6 +6361,28 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) } EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); +static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + + rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word); + if ((word & 0x00ff) != 0x00ff) + return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL); + + return 0; +} + +static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + + rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word); + if ((word & 0x00ff) != 0x00ff) + return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL); + + return 0; +} + static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; @@ -6455,13 +6477,7 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0); rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word); - rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word); - if ((word & 0x00ff) != 0x00ff) { - drv_data->txmixer_gain_24g = - rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL); - } else { - drv_data->txmixer_gain_24g = 0; - } + drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev); rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10) @@ -6472,13 +6488,7 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) default_lna_gain); rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word); - rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word); - if ((word & 0x00ff) != 0x00ff) { - drv_data->txmixer_gain_5g = - rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL); - } else { - drv_data->txmixer_gain_5g = 0; - } + drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev); rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10) -- cgit v0.10.2 From 6316c786cc8aff762530ea740233bf2da10fea33 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:26 +0200 Subject: rt2x00: rt2800lib: hardcode TX mixer gain values for RT3593 The reference code uses hardcoded zero TX mixer gain value for RT3593. Do the same in the rt2x00 driver. Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: NICReadEEPROMParameters in common/rtmp_init.c Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1f32732..65045e0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -6365,6 +6365,9 @@ static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev) { u16 word; + if (rt2x00_rt(rt2x00dev, RT3593)) + return 0; + rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word); if ((word & 0x00ff) != 0x00ff) return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL); @@ -6376,6 +6379,9 @@ static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev) { u16 word; + if (rt2x00_rt(rt2x00dev, RT3593)) + return 0; + rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word); if ((word & 0x00ff) != 0x00ff) return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL); -- cgit v0.10.2 From f36bb0ca1be5bcb7148ad32263626f8609dfc0d7 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:27 +0200 Subject: rt2x00: rt2800lib: fix LNA_A[12] gain values for RT3593 The LNA_A[12] gain values are stored at a different offset in the EEPROM on RT3593 based devices. However the current code unconditionally reads those values from the location used by other chipsets. Fix the code to use the correct EEPROM offset. Based on the DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. References: RT3593_EEPROM_RSSI2_OFFSET_ALNAGAIN1_24G_READ in include/chip/rt3593.h RT3593_EEPROM_RSSI2_OFFSET_ALNAGAIN2_5G_READ in include/chip/rt3593.h Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 69749ba..7688e15 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2672,6 +2672,10 @@ enum rt2800_eeprom_word { #define EEPROM_BBP_VALUE FIELD16(0x00ff) #define EEPROM_BBP_REG_ID FIELD16(0xff00) +/* EEPROM_EXT_LNA2 */ +#define EEPROM_EXT_LNA2_A1 FIELD16(0x00ff) +#define EEPROM_EXT_LNA2_A2 FIELD16(0xff00) + /* * EEPROM IQ Calibration, unlike other entries those are byte addresses. */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 65045e0..419ea05 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1826,11 +1826,25 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0); } else if (libconf->rf.channel <= 128) { - rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); - lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1); + if (rt2x00_rt(rt2x00dev, RT3593)) { + rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, + EEPROM_EXT_LNA2_A1); + } else { + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, + EEPROM_RSSI_BG2_LNA_A1); + } } else { - rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); - lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2); + if (rt2x00_rt(rt2x00dev, RT3593)) { + rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, + EEPROM_EXT_LNA2_A2); + } else { + rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, + EEPROM_RSSI_A2_LNA_A2); + } } rt2x00dev->lna_gain = lna_gain; @@ -6488,10 +6502,12 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0); - if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 || - rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff) - rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1, - default_lna_gain); + if (!rt2x00_rt(rt2x00dev, RT3593)) { + if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 || + rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff) + rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1, + default_lna_gain); + } rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word); drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev); @@ -6506,12 +6522,27 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0); - if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 || - rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff) - rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2, - default_lna_gain); + if (!rt2x00_rt(rt2x00dev, RT3593)) { + if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 || + rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff) + rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2, + default_lna_gain); + } rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); + if (rt2x00_rt(rt2x00dev, RT3593)) { + rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &word); + if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 || + rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff) + rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1, + default_lna_gain); + if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0x00 || + rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0xff) + rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1, + default_lna_gain); + rt2800_eeprom_write(rt2x00dev, EEPROM_EXT_LNA2, word); + } + return 0; } -- cgit v0.10.2 From c0a14369ebd3e7940e70e397ecc3dd7eaf81e9ab Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:28 +0200 Subject: rt2x00: rt2800lib: add default_power3 field for three-chain devices The actual code uses two default TX power values. This is enough for 1T and for 2T devices however on 3T devices another value is needed for the third chain. Add a new field to struct channel_info and initialize it from the 'rt2800_probe_hw_mode' function. Also modify the 'rt2800_config_channel' to handle the new field as well. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 419ea05..e3e652d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2768,6 +2768,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, info->default_power1); info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel, info->default_power2); + if (rt2x00dev->default_ant.tx_chain_num > 2) + info->default_power3 = + rt2800_txpower_to_dev(rt2x00dev, rf->channel, + info->default_power3); switch (rt2x00dev->chip.rf) { case RF2020: @@ -6963,6 +6967,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) struct channel_info *info; char *default_power1; char *default_power2; + char *default_power3; unsigned int i; u16 eeprom; u32 reg; @@ -7115,9 +7120,17 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); + if (rt2x00dev->default_ant.tx_chain_num > 2) + default_power3 = rt2800_eeprom_addr(rt2x00dev, + EEPROM_EXT_TXPOWER_BG3); + else + default_power3 = NULL; + for (i = 0; i < 14; i++) { info[i].default_power1 = default_power1[i]; info[i].default_power2 = default_power2[i]; + if (default_power3) + info[i].default_power3 = default_power3[i]; } if (spec->num_channels > 14) { @@ -7126,9 +7139,18 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); + if (rt2x00dev->default_ant.tx_chain_num > 2) + default_power3 = + rt2800_eeprom_addr(rt2x00dev, + EEPROM_EXT_TXPOWER_A3); + else + default_power3 = NULL; + for (i = 14; i < spec->num_channels; i++) { info[i].default_power1 = default_power1[i - 14]; info[i].default_power2 = default_power2[i - 14]; + if (default_power3) + info[i].default_power3 = default_power3[i - 14]; } } diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index ee3fc57..fe4c572 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -211,6 +211,7 @@ struct channel_info { short max_power; short default_power1; short default_power2; + short default_power3; }; /* -- cgit v0.10.2 From c8b9d3dc83cab569de6054a10e355a143e2b52a0 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:29 +0200 Subject: rt2x00: rt2800lib: add rf_vals for RF3053 Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. References: FreqItems3053 in chips/rt3593.c Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index e3e652d..5119bf0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -6961,6 +6961,72 @@ static const struct rf_channel rf_vals_5592_xtal40[] = { {196, 83, 0, 12, 1}, }; +static const struct rf_channel rf_vals_3053[] = { + /* Channel, N, R, K */ + {1, 241, 2, 2}, + {2, 241, 2, 7}, + {3, 242, 2, 2}, + {4, 242, 2, 7}, + {5, 243, 2, 2}, + {6, 243, 2, 7}, + {7, 244, 2, 2}, + {8, 244, 2, 7}, + {9, 245, 2, 2}, + {10, 245, 2, 7}, + {11, 246, 2, 2}, + {12, 246, 2, 7}, + {13, 247, 2, 2}, + {14, 248, 2, 4}, + + {36, 0x56, 0, 4}, + {38, 0x56, 0, 6}, + {40, 0x56, 0, 8}, + {44, 0x57, 0, 0}, + {46, 0x57, 0, 2}, + {48, 0x57, 0, 4}, + {52, 0x57, 0, 8}, + {54, 0x57, 0, 10}, + {56, 0x58, 0, 0}, + {60, 0x58, 0, 4}, + {62, 0x58, 0, 6}, + {64, 0x58, 0, 8}, + + {100, 0x5B, 0, 8}, + {102, 0x5B, 0, 10}, + {104, 0x5C, 0, 0}, + {108, 0x5C, 0, 4}, + {110, 0x5C, 0, 6}, + {112, 0x5C, 0, 8}, + + /* NOTE: Channel 114 has been removed intentionally. + * The EEPROM contains no TX power values for that, + * and it is disabled in the vendor driver as well. + */ + + {116, 0x5D, 0, 0}, + {118, 0x5D, 0, 2}, + {120, 0x5D, 0, 4}, + {124, 0x5D, 0, 8}, + {126, 0x5D, 0, 10}, + {128, 0x5E, 0, 0}, + {132, 0x5E, 0, 4}, + {134, 0x5E, 0, 6}, + {136, 0x5E, 0, 8}, + {140, 0x5F, 0, 0}, + + {149, 0x5F, 0, 9}, + {151, 0x5F, 0, 11}, + {153, 0x60, 0, 1}, + {157, 0x60, 0, 5}, + {159, 0x60, 0, 7}, + {161, 0x60, 0, 9}, + {165, 0x61, 0, 1}, + {167, 0x61, 0, 3}, + {169, 0x61, 0, 5}, + {171, 0x61, 0, 7}, + {173, 0x61, 0, 9}, +}; + static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; @@ -7052,6 +7118,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_3x); spec->channels = rf_vals_3x; + } else if (rt2x00_rf(rt2x00dev, RF3053)) { + spec->supported_bands |= SUPPORT_BAND_5GHZ; + spec->num_channels = ARRAY_SIZE(rf_vals_3053); + spec->channels = rf_vals_3053; } else if (rt2x00_rf(rt2x00dev, RF5592)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; -- cgit v0.10.2 From f42b046578efb018064302fd9b66586f5da7d75b Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:30 +0200 Subject: rt2x00: rt2800lib: add channel configuration for RF3053 Based on the Ralink DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: RT3593_ChipSwitchChannel in chips/rt3593.c Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 7688e15..1ba797b 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2092,6 +2092,10 @@ struct mac_iveiv_entry { #define BBP109_TX0_POWER FIELD8(0x0f) #define BBP109_TX1_POWER FIELD8(0xf0) +/* BBP 110 */ +#define BBP110_TX2_POWER FIELD8(0x0f) + + /* * BBP 138: Unknown */ @@ -2141,6 +2145,12 @@ struct mac_iveiv_entry { #define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80) /* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */ #define RFCSR3_VCOCAL_EN FIELD8(0x80) +/* Bits for RF3050 */ +#define RFCSR3_BIT1 FIELD8(0x02) +#define RFCSR3_BIT2 FIELD8(0x04) +#define RFCSR3_BIT3 FIELD8(0x08) +#define RFCSR3_BIT4 FIELD8(0x10) +#define RFCSR3_BIT5 FIELD8(0x20) /* * FRCSR 5: @@ -2153,6 +2163,8 @@ struct mac_iveiv_entry { #define RFCSR6_R1 FIELD8(0x03) #define RFCSR6_R2 FIELD8(0x40) #define RFCSR6_TXDIV FIELD8(0x0c) +/* bits for RF3053 */ +#define RFCSR6_VCO_IC FIELD8(0xc0) /* * RFCSR 7: @@ -2177,7 +2189,12 @@ struct mac_iveiv_entry { * RFCSR 11: */ #define RFCSR11_R FIELD8(0x03) +#define RFCSR11_PLL_MOD FIELD8(0x0c) #define RFCSR11_MOD FIELD8(0xc0) +/* bits for RF3053 */ +/* TODO: verify RFCSR11_MOD usage on other chips */ +#define RFCSR11_PLL_IDOH FIELD8(0x40) + /* * RFCSR 12: @@ -2273,6 +2290,12 @@ struct mac_iveiv_entry { #define RFCSR31_RX_H20M FIELD8(0x20) #define RFCSR31_RX_CALIB FIELD8(0x7f) +/* RFCSR 32 bits for RF3053 */ +#define RFCSR32_TX_AGC_FC FIELD8(0xf8) + +/* RFCSR 36 bits for RF3053 */ +#define RFCSR36_RF_BS FIELD8(0x80) + /* * RFCSR 38: */ @@ -2281,6 +2304,7 @@ struct mac_iveiv_entry { /* * RFCSR 39: */ +#define RFCSR39_RX_DIV FIELD8(0x40) #define RFCSR39_RX_LO2_EN FIELD8(0x80) /* @@ -2288,18 +2312,36 @@ struct mac_iveiv_entry { */ #define RFCSR49_TX FIELD8(0x3f) #define RFCSR49_EP FIELD8(0xc0) +/* bits for RT3593 */ +#define RFCSR49_TX_LO1_IC FIELD8(0x1c) +#define RFCSR49_TX_DIV FIELD8(0x20) /* * RFCSR 50: */ #define RFCSR50_TX FIELD8(0x3f) #define RFCSR50_EP FIELD8(0xc0) -/* bits for RT3593*/ +/* bits for RT3593 */ +#define RFCSR50_TX_LO1_EN FIELD8(0x20) #define RFCSR50_TX_LO2_EN FIELD8(0x10) /* RFCSR 51 */ -/* bits for RT3593*/ +/* bits for RT3593 */ +#define RFCSR51_BITS01 FIELD8(0x03) #define RFCSR51_BITS24 FIELD8(0x1c) +#define RFCSR51_BITS57 FIELD8(0xe0) + +#define RFCSR53_TX_POWER FIELD8(0x3f) +#define RFCSR53_UNKNOWN FIELD8(0xc0) + +#define RFCSR54_TX_POWER FIELD8(0x3f) +#define RFCSR54_UNKNOWN FIELD8(0xc0) + +#define RFCSR55_TX_POWER FIELD8(0x3f) +#define RFCSR55_UNKNOWN FIELD8(0xc0) + +#define RFCSR57_DRV_CC FIELD8(0xfc) + /* * RF registers diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 5119bf0..e6d6c75 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2168,6 +2168,303 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); } +static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + struct rf_channel *rf, + struct channel_info *info) +{ + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + u8 txrx_agc_fc; + u8 txrx_h20m; + u8 rfcsr; + u8 bbp; + const bool txbf_enabled = false; /* TODO */ + + /* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */ + rt2800_bbp_read(rt2x00dev, 109, &bbp); + rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0); + rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0); + rt2800_bbp_write(rt2x00dev, 109, bbp); + + rt2800_bbp_read(rt2x00dev, 110, &bbp); + rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0); + rt2800_bbp_write(rt2x00dev, 110, bbp); + + if (rf->channel <= 14) { + /* Restore BBP 25 & 26 for 2.4 GHz */ + rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25); + rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26); + } else { + /* Hard code BBP 25 & 26 for 5GHz */ + + /* Enable IQ Phase correction */ + rt2800_bbp_write(rt2x00dev, 25, 0x09); + /* Setup IQ Phase correction value */ + rt2800_bbp_write(rt2x00dev, 26, 0xff); + } + + rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); + rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf); + + rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3)); + rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1); + if (rf->channel <= 14) + rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1); + else + rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2); + rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 53, &rfcsr); + if (rf->channel <= 14) { + rfcsr = 0; + rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER, + info->default_power1 & 0x1f); + } else { + if (rt2x00_is_usb(rt2x00dev)) + rfcsr = 0x40; + + rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER, + ((info->default_power1 & 0x18) << 1) | + (info->default_power1 & 7)); + } + rt2800_rfcsr_write(rt2x00dev, 53, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 55, &rfcsr); + if (rf->channel <= 14) { + rfcsr = 0; + rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER, + info->default_power2 & 0x1f); + } else { + if (rt2x00_is_usb(rt2x00dev)) + rfcsr = 0x40; + + rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER, + ((info->default_power2 & 0x18) << 1) | + (info->default_power2 & 7)); + } + rt2800_rfcsr_write(rt2x00dev, 55, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 54, &rfcsr); + if (rf->channel <= 14) { + rfcsr = 0; + rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER, + info->default_power3 & 0x1f); + } else { + if (rt2x00_is_usb(rt2x00dev)) + rfcsr = 0x40; + + rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER, + ((info->default_power3 & 0x18) << 1) | + (info->default_power3 & 7)); + } + rt2800_rfcsr_write(rt2x00dev, 54, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); + rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); + rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0); + rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0); + rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0); + rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0); + rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); + rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); + + switch (rt2x00dev->default_ant.tx_chain_num) { + case 3: + rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1); + /* fallthrough */ + case 2: + rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); + /* fallthrough */ + case 1: + rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); + break; + } + + switch (rt2x00dev->default_ant.rx_chain_num) { + case 3: + rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1); + /* fallthrough */ + case 2: + rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); + /* fallthrough */ + case 1: + rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); + break; + } + rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); + + /* TODO: frequency calibration? */ + + if (conf_is_ht40(conf)) { + txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40, + RFCSR24_TX_AGC_FC); + txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw40, + RFCSR24_TX_H20M); + } else { + txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw20, + RFCSR24_TX_AGC_FC); + txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw20, + RFCSR24_TX_H20M); + } + + /* NOTE: the reference driver does not writes the new value + * back to RFCSR 32 + */ + rt2800_rfcsr_read(rt2x00dev, 32, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc); + + if (rf->channel <= 14) + rfcsr = 0xa0; + else + rfcsr = 0x80; + rt2800_rfcsr_write(rt2x00dev, 31, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m); + rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + + /* Band selection */ + rt2800_rfcsr_read(rt2x00dev, 36, &rfcsr); + if (rf->channel <= 14) + rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1); + else + rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0); + rt2800_rfcsr_write(rt2x00dev, 36, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 34, &rfcsr); + if (rf->channel <= 14) + rfcsr = 0x3c; + else + rfcsr = 0x20; + rt2800_rfcsr_write(rt2x00dev, 34, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr); + if (rf->channel <= 14) + rfcsr = 0x1a; + else + rfcsr = 0x12; + rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); + if (rf->channel >= 1 && rf->channel <= 14) + rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1); + else if (rf->channel >= 36 && rf->channel <= 64) + rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2); + else if (rf->channel >= 100 && rf->channel <= 128) + rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2); + else + rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1); + rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + + rt2800_rfcsr_write(rt2x00dev, 46, 0x60); + + if (rf->channel <= 14) { + rt2800_rfcsr_write(rt2x00dev, 10, 0xd3); + rt2800_rfcsr_write(rt2x00dev, 13, 0x12); + } else { + rt2800_rfcsr_write(rt2x00dev, 10, 0xd8); + rt2800_rfcsr_write(rt2x00dev, 13, 0x23); + } + + rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1); + rt2800_rfcsr_write(rt2x00dev, 51, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr); + if (rf->channel <= 14) { + rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5); + rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3); + } else { + rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 4); + rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 2); + } + rt2800_rfcsr_write(rt2x00dev, 51, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr); + if (rf->channel <= 14) + rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3); + else + rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 2); + + if (txbf_enabled) + rt2x00_set_field8(&rfcsr, RFCSR49_TX_DIV, 1); + + rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0); + rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 57, &rfcsr); + if (rf->channel <= 14) + rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b); + else + rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 57, rfcsr); + + if (rf->channel <= 14) { + rt2800_rfcsr_write(rt2x00dev, 44, 0x93); + rt2800_rfcsr_write(rt2x00dev, 52, 0x45); + } else { + rt2800_rfcsr_write(rt2x00dev, 44, 0x9b); + rt2800_rfcsr_write(rt2x00dev, 52, 0x05); + } + + /* Initiate VCO calibration */ + rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); + if (rf->channel <= 14) { + rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); + } else { + rt2x00_set_field8(&rfcsr, RFCSR3_BIT1, 1); + rt2x00_set_field8(&rfcsr, RFCSR3_BIT2, 1); + rt2x00_set_field8(&rfcsr, RFCSR3_BIT3, 1); + rt2x00_set_field8(&rfcsr, RFCSR3_BIT4, 1); + rt2x00_set_field8(&rfcsr, RFCSR3_BIT5, 1); + rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); + } + rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); + + if (rf->channel >= 1 && rf->channel <= 14) { + rfcsr = 0x23; + if (txbf_enabled) + rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); + rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); + + rt2800_rfcsr_write(rt2x00dev, 45, 0xbb); + } else if (rf->channel >= 36 && rf->channel <= 64) { + rfcsr = 0x36; + if (txbf_enabled) + rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); + rt2800_rfcsr_write(rt2x00dev, 39, 0x36); + + rt2800_rfcsr_write(rt2x00dev, 45, 0xeb); + } else if (rf->channel >= 100 && rf->channel <= 128) { + rfcsr = 0x32; + if (txbf_enabled) + rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); + rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); + + rt2800_rfcsr_write(rt2x00dev, 45, 0xb3); + } else { + rfcsr = 0x30; + if (txbf_enabled) + rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); + rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); + + rt2800_rfcsr_write(rt2x00dev, 45, 0x9b); + } +} + #define POWER_BOUND 0x27 #define POWER_BOUND_5G 0x2b #define FREQ_OFFSET_BOUND 0x5f @@ -2784,6 +3081,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, case RF3052: rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info); break; + case RF3053: + rt2800_config_channel_rf3053(rt2x00dev, conf, rf, info); + break; case RF3290: rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info); break; @@ -2829,6 +3129,23 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 27, 0x20); rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); + } else if (rt2x00_rt(rt2x00dev, RT3593)) { + if (rf->channel > 14) { + /* Disable CCK Packet detection on 5GHz */ + rt2800_bbp_write(rt2x00dev, 70, 0x00); + } else { + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + } + + if (conf_is_ht40(conf)) + rt2800_bbp_write(rt2x00dev, 105, 0x04); + else + rt2800_bbp_write(rt2x00dev, 105, 0x34); + + rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 77, 0x98); } else { rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); @@ -2844,16 +3161,27 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 75, 0x46); } else { - rt2800_bbp_write(rt2x00dev, 82, 0x84); + if (rt2x00_rt(rt2x00dev, RT3593)) + rt2800_bbp_write(rt2x00dev, 82, 0x62); + else + rt2800_bbp_write(rt2x00dev, 82, 0x84); rt2800_bbp_write(rt2x00dev, 75, 0x50); } + if (rt2x00_rt(rt2x00dev, RT3593)) + rt2800_bbp_write(rt2x00dev, 83, 0x8a); } + } else { if (rt2x00_rt(rt2x00dev, RT3572)) rt2800_bbp_write(rt2x00dev, 82, 0x94); + else if (rt2x00_rt(rt2x00dev, RT3593)) + rt2800_bbp_write(rt2x00dev, 82, 0x82); else rt2800_bbp_write(rt2x00dev, 82, 0xf2); + if (rt2x00_rt(rt2x00dev, RT3593)) + rt2800_bbp_write(rt2x00dev, 83, 0x9a); + if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) rt2800_bbp_write(rt2x00dev, 75, 0x46); else @@ -2924,6 +3252,41 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, if (rt2x00_rt(rt2x00dev, RT3572)) rt2800_rfcsr_write(rt2x00dev, 8, 0x80); + if (rt2x00_rt(rt2x00dev, RT3593)) { + if (rt2x00_is_usb(rt2x00dev)) { + rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); + + /* Band selection. GPIO #8 controls all paths */ + rt2x00_set_field32(®, GPIO_CTRL_DIR8, 0); + if (rf->channel <= 14) + rt2x00_set_field32(®, GPIO_CTRL_VAL8, 1); + else + rt2x00_set_field32(®, GPIO_CTRL_VAL8, 0); + + rt2x00_set_field32(®, GPIO_CTRL_DIR4, 0); + rt2x00_set_field32(®, GPIO_CTRL_DIR7, 0); + + /* LNA PE control. + * GPIO #4 controls PE0 and PE1, + * GPIO #7 controls PE2 + */ + rt2x00_set_field32(®, GPIO_CTRL_VAL4, 1); + rt2x00_set_field32(®, GPIO_CTRL_VAL7, 1); + + rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); + } + + /* AGC init */ + if (rf->channel <= 14) + reg = 0x1c + 2 * rt2x00dev->lna_gain; + else + reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3); + + rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); + + usleep_range(1000, 1500); + } + if (rt2x00_rt(rt2x00dev, RT5592)) { rt2800_bbp_write(rt2x00dev, 195, 141); rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a); @@ -5883,6 +6246,7 @@ static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev) /* FIXME: BBP 105 owerwrite? */ rt2800_bbp_write(rt2x00dev, 105, 0x04); + } static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) -- cgit v0.10.2 From 1095df07bfc5924e100f1748e6ebc9e5a5881565 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:31 +0200 Subject: rt2x00: rt2800lib: enable VCO recalibration for RF3053 Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index e6d6c75..46d5f42 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -4194,6 +4194,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); break; + case RF3053: case RF3290: case RF5360: case RF5370: @@ -7595,6 +7596,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3022: case RF3320: case RF3052: + case RF3053: case RF3290: case RF5360: case RF5370: -- cgit v0.10.2 From 0f5af26a49c8d6a50ecec2f1b66174069c9f9581 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:32 +0200 Subject: rt2x00: rt2800lib: enable RF3053 support Support for the RF3053 has been implemented in the previous changes, so it is safe to mark it supported in the driver. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 46d5f42..9ab587e 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -6948,6 +6948,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) case RF3021: case RF3022: case RF3052: + case RF3053: case RF3290: case RF3320: case RF3322: -- cgit v0.10.2 From 2dc2bd2f8aa8eb79184fb3c7e5f530006500897f Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:33 +0200 Subject: rt2x00: rt2800lib: enable RT3593 support Support for the RT3593 has been implemented in the previous changes, so it is safe to mark it supported in the driver. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 9ab587e..dedc3d4 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -7636,6 +7636,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev) case RT3352: case RT3390: case RT3572: + case RT3593: case RT5390: case RT5392: case RT5592: -- cgit v0.10.2 From 65d3c0d5cffb9f1227927544e418a9ac231eae42 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:34 +0200 Subject: rt2x00: rt2800usb: use correct [RT]XWI size for RT3593 The RT3593 chipset requires different [RT]XWI size values. Modify the driver to use the correct values. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 1ba797b..a313241 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2814,6 +2814,7 @@ enum rt2800_eeprom_word { #define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32)) #define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32)) +#define RXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32)) #define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32)) /* diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 840833b..c24c1fd 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -854,7 +854,10 @@ static void rt2800usb_queue_init(struct data_queue *queue) struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; unsigned short txwi_size, rxwi_size; - if (rt2x00_rt(rt2x00dev, RT5592)) { + if (rt2x00_rt(rt2x00dev, RT3593)) { + txwi_size = TXWI_DESC_SIZE_4WORDS; + rxwi_size = RXWI_DESC_SIZE_5WORDS; + } else if (rt2x00_rt(rt2x00dev, RT5592)) { txwi_size = TXWI_DESC_SIZE_5WORDS; rxwi_size = RXWI_DESC_SIZE_6WORDS; } else { -- cgit v0.10.2 From d02433d15566f542e42e3c469dfade0de332dc7b Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Mon, 8 Jul 2013 16:08:35 +0200 Subject: rt2x00: rt2800usb: add USB device ID for Linksys AE3000 The Linksys AE3000 device is based on the RT3573 chipset. The support for this chipset is available already, and the AE3000 device works with the driver. Only managed mode works correctly at the moment, for AP mode additional changes are needed in the driver. Also add a new RT2800USB_RT3573 Kconfig option and only enable support for RT3573 based devices if that is enabled. Signed-off-by: Gabor Juhos Acked-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 9b915d3..c60d6e8 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -166,6 +166,12 @@ config RT2800USB_RT35XX rt2800usb driver. Supported chips: RT3572 +config RT2800USB_RT3573 + bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)" + ---help--- + This enables support for RT3573 chipset based wireless USB devices + in the rt2800usb driver. + config RT2800USB_RT53XX bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)" ---help--- diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index c24c1fd..ab609ae 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1197,6 +1197,10 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Zinwell */ { USB_DEVICE(0x5a57, 0x0284) }, #endif +#ifdef CONFIG_RT2800USB_RT3573 + /* Linksys */ + { USB_DEVICE(0x13b1, 0x003b) }, +#endif #ifdef CONFIG_RT2800USB_RT53XX /* Arcadyan */ { USB_DEVICE(0x043e, 0x7a12) }, -- cgit v0.10.2 From f988b23f7a5045ec5da627ced75de1c2eb080af8 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 11 Jul 2013 18:03:37 +0300 Subject: wil6210: Align WMI header with latest FW FW guys changed header structure; align driver code Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h index eff1239..e59239d 100644 --- a/drivers/net/wireless/ath/wil6210/trace.h +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -37,36 +37,40 @@ static inline void trace_ ## name(proto) {} #endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */ DECLARE_EVENT_CLASS(wil6210_wmi, - TP_PROTO(u16 id, void *buf, u16 buf_len), + TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), - TP_ARGS(id, buf, buf_len), + TP_ARGS(wmi, buf, buf_len), TP_STRUCT__entry( + __field(u8, mid) __field(u16, id) + __field(u32, timestamp) __field(u16, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( - __entry->id = id; + __entry->mid = wmi->mid; + __entry->id = le16_to_cpu(wmi->id); + __entry->timestamp = le32_to_cpu(wmi->timestamp); __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "id 0x%04x len %d", - __entry->id, __entry->buf_len + "MID %d id 0x%04x len %d timestamp %d", + __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp ) ); DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd, - TP_PROTO(u16 id, void *buf, u16 buf_len), - TP_ARGS(id, buf, buf_len) + TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_ARGS(wmi, buf, buf_len) ); DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event, - TP_PROTO(u16 id, void *buf, u16 buf_len), - TP_ARGS(id, buf, buf_len) + TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), + TP_ARGS(wmi, buf, buf_len) ); #define WIL6210_MSG_MAX (200) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 44fdab5..129c480 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -156,11 +156,22 @@ struct wil6210_mbox_hdr { /* max. value for wil6210_mbox_hdr.len */ #define MAX_MBOXITEM_SIZE (240) +/** + * struct wil6210_mbox_hdr_wmi - WMI header + * + * @mid: MAC ID + * 00 - default, created by FW + * 01..0f - WiFi ports, driver to create + * 10..fe - debug + * ff - broadcast + * @id: command/event ID + * @timestamp: FW fills for events, free-running msec timer + */ struct wil6210_mbox_hdr_wmi { - u8 reserved0[2]; + u8 mid; + u8 reserved; __le16 id; - __le16 info1; /* bits [0..3] - device_id, rest - unused */ - u8 reserved1[2]; + __le32 timestamp; } __packed; struct pending_wmi_event { diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index dc8059a..a62511a 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -172,8 +172,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) .len = cpu_to_le16(sizeof(cmd.wmi) + len), }, .wmi = { + .mid = 0, .id = cpu_to_le16(cmdid), - .info1 = 0, }, }; struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; @@ -248,7 +248,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) iowrite32(r->head = next_head, wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head)); - trace_wil6210_wmi_cmd(cmdid, buf, len); + trace_wil6210_wmi_cmd(&cmd.wmi, buf, len); /* interrupt to FW */ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); @@ -640,9 +640,13 @@ void wmi_recv_cmd(struct wil6210_priv *wil) hdr.flags); if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - u16 id = le16_to_cpu(evt->event.wmi.id); - wil_dbg_wmi(wil, "WMI event 0x%04x\n", id); - trace_wil6210_wmi_event(id, &evt->event.wmi, len); + struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; + u16 id = le16_to_cpu(wmi->id); + u32 tstamp = le32_to_cpu(wmi->timestamp); + wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", + id, wmi->mid, tstamp); + trace_wil6210_wmi_event(wmi, &wmi[1], + len - sizeof(*wmi)); } wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, &evt->event.hdr, sizeof(hdr) + len, true); -- cgit v0.10.2 From 4d1ac0721aae84eec3c8aa54c7574b14f79863bc Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 11 Jul 2013 18:03:38 +0300 Subject: wil6210: fix wrong index in wil_vring_free When destroying Rx vring, branch for Rx used wrong Tx descriptor: while SW context was taken for "head", HW descriptor was, by mistake, taken from "tail" Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index d240b24..8fde73a 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -133,7 +133,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, } else { /* rx */ struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = - &vring->va[vring->swtail].rx; + &vring->va[vring->swhead].rx; *d = *_d; pa = wil_desc_addr(&d->dma.addr); -- cgit v0.10.2 From 03269c658b7a2f6ccfa44d7270da8446881f9552 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 11 Jul 2013 18:03:39 +0300 Subject: wil6210: Optimize Tx completion No need to modify HW descriptor, as it will be re-initialized on Tx. Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 8fde73a..dd5f35e 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -855,10 +855,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) } else { dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } - d->dma.addr.addr_low = 0; - d->dma.addr.addr_high = 0; - d->dma.length = 0; - d->dma.status = TX_DMA_STATUS_DU; + /* + * There is no need to touch HW descriptor: + * - ststus bit TX_DMA_STATUS_DU is set by design, + * so hardware will not try to process this desc., + * - rest of descriptor will be initialized on Tx. + */ vring->swtail = wil_vring_next_tail(vring); done++; } -- cgit v0.10.2 From f88f113a54f02df62608ec263e8a3ff7e81cfce2 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 11 Jul 2013 18:03:40 +0300 Subject: wil6210: Introduce struct for sw context Enable adding more data to the SW context. For now, add flag "mapped_as_page", to separate decisions on free-ing skb and type of DMA mapping. This allows linking skb itself to any descriptor of fragmented skb. Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index e8308ec..971ce46 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, if ((i % 64) == 0 && (i != 0)) seq_printf(s, "\n"); seq_printf(s, "%s", (d->dma.status & BIT(0)) ? - "S" : (vring->ctx[i] ? "H" : "h")); + "S" : (vring->ctx[i].skb ? "H" : "h")); } seq_printf(s, "\n"); } @@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) volatile struct vring_tx_desc *d = &(vring->va[dbg_txdesc_index].tx); volatile u32 *u = (volatile u32 *)d; - struct sk_buff *skb = vring->ctx[dbg_txdesc_index]; + struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb; seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index); seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n", diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index dd5f35e..44cdd2a 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -70,7 +70,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) vring->swhead = 0; vring->swtail = 0; - vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL); + vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); if (!vring->ctx) { vring->va = NULL; return -ENOMEM; @@ -108,39 +108,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, while (!wil_vring_is_empty(vring)) { dma_addr_t pa; - struct sk_buff *skb; u16 dmalen; + struct wil_ctx *ctx; if (tx) { struct vring_tx_desc dd, *d = ⅆ volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx; + ctx = &vring->ctx[vring->swtail]; *d = *_d; pa = wil_desc_addr(&d->dma.addr); dmalen = le16_to_cpu(d->dma.length); - skb = vring->ctx[vring->swtail]; - if (skb) { - dma_unmap_single(dev, pa, dmalen, - DMA_TO_DEVICE); - dev_kfree_skb_any(skb); - vring->ctx[vring->swtail] = NULL; - } else { + if (vring->ctx[vring->swtail].mapped_as_page) { dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); + } else { + dma_unmap_single(dev, pa, dmalen, + DMA_TO_DEVICE); } + if (ctx->skb) + dev_kfree_skb_any(ctx->skb); vring->swtail = wil_vring_next_tail(vring); } else { /* rx */ struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = &vring->va[vring->swhead].rx; + ctx = &vring->ctx[vring->swhead]; *d = *_d; pa = wil_desc_addr(&d->dma.addr); dmalen = le16_to_cpu(d->dma.length); - skb = vring->ctx[vring->swhead]; dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); - kfree_skb(skb); + kfree_skb(ctx->skb); wil_vring_advance_head(vring, 1); } } @@ -187,7 +187,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ d->dma.length = cpu_to_le16(sz); *_d = *d; - vring->ctx[i] = skb; + vring->ctx[i].skb = skb; return 0; } @@ -352,11 +352,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, return NULL; } - skb = vring->ctx[vring->swhead]; + skb = vring->ctx[vring->swhead].skb; d = wil_skb_rxdesc(skb); *d = *_d; pa = wil_desc_addr(&d->dma.addr); - vring->ctx[vring->swhead] = NULL; + vring->ctx[vring->swhead].skb = NULL; wil_vring_advance_head(vring, 1); dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); @@ -703,7 +703,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, if (unlikely(dma_mapping_error(dev, pa))) goto dma_error; wil_tx_desc_map(d, pa, len, vring_index); - vring->ctx[i] = NULL; + vring->ctx[i].mapped_as_page = 1; *_d = *d; } /* for the last seg only */ @@ -724,7 +724,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, * to prevent skb release before accounting * in case of immediate "tx done" */ - vring->ctx[i] = skb_get(skb); + vring->ctx[i].skb = skb_get(skb); return 0; dma_error: @@ -732,6 +732,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* Note: increment @f to operate with positive index */ for (f++; f > 0; f--) { u16 dmalen; + struct wil_ctx *ctx = &vring->ctx[i]; i = (swhead + f) % vring->size; _d = &(vring->va[i].tx); @@ -739,10 +740,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, _d->dma.status = TX_DMA_STATUS_DU; pa = wil_desc_addr(&d->dma.addr); dmalen = le16_to_cpu(d->dma.length); - if (vring->ctx[i]) - dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); - else + if (ctx->mapped_as_page) dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); + else + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); + + if (ctx->skb) + dev_kfree_skb_any(ctx->skb); + + memset(ctx, 0, sizeof(*ctx)); } return -EINVAL; @@ -821,8 +827,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) &vring->va[vring->swtail].tx; struct vring_tx_desc dd, *d = ⅆ dma_addr_t pa; - struct sk_buff *skb; u16 dmalen; + struct wil_ctx *ctx = &vring->ctx[vring->swtail]; + struct sk_buff *skb = ctx->skb; *d = *_d; @@ -840,7 +847,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) (const void *)d, sizeof(*d), false); pa = wil_desc_addr(&d->dma.addr); - skb = vring->ctx[vring->swtail]; + if (ctx->mapped_as_page) + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); + else + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); + if (skb) { if (d->dma.error == 0) { ndev->stats.tx_packets++; @@ -849,12 +860,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) ndev->stats.tx_errors++; } - dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); dev_kfree_skb_any(skb); - vring->ctx[vring->swtail] = NULL; - } else { - dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } + memset(ctx, 0, sizeof(*ctx)); /* * There is no need to touch HW descriptor: * - ststus bit TX_DMA_STATUS_DU is set by design, diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 129c480..c4a5163 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -183,6 +183,14 @@ struct pending_wmi_event { } __packed event; }; +/** + * struct wil_ctx - software context for Vring descriptor + */ +struct wil_ctx { + struct sk_buff *skb; + u8 mapped_as_page:1; +}; + union vring_desc; struct vring { @@ -192,7 +200,7 @@ struct vring { u32 swtail; u32 swhead; u32 hwtail; /* write here to inform hw */ - void **ctx; /* void *ctx[size] - software context */ + struct wil_ctx *ctx; /* ctx[size] - software context */ }; enum { /* for wil6210_priv.status */ -- cgit v0.10.2 From 6cdadd4dc7621e47739a84548e70713e157fa850 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 11 Jul 2013 18:03:41 +0300 Subject: wil6210: fix subtle race in wil_tx_vring Finish all SW context modifications prior to notifying hardware It used to be race condition: if HW finish Tx and issue Tx completion IRQ very fast, prior to SW context update in wil_tx_vring, Tx completion will mis-handle descriptor, as SW part will have no skb pointer stored. Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 44cdd2a..2a9d56a 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -712,6 +712,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); *_d = *d; + /* hold reference to skb + * to prevent skb release before accounting + * in case of immediate "tx done" + */ + vring->ctx[i].skb = skb_get(skb); + wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); @@ -720,11 +726,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); - /* hold reference to skb - * to prevent skb release before accounting - * in case of immediate "tx done" - */ - vring->ctx[i].skb = skb_get(skb); return 0; dma_error: -- cgit v0.10.2 From bb4997a1afbff61084b243d62aaaf23ea38a290e Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Mon, 15 Jul 2013 13:15:04 +0200 Subject: bcma: add some more core names These cores were found on a BCM4708 (chipid 53010), this is a ARM SoC with two Cortex A9 cores. bcma: bus0: Found chip with id 0xCF12, rev 0x00 and package 0x02 bcma: bus0: Core 0 found: ChipCommon (manuf 0x4BF, id 0x800, rev 0x2A, class 0x0) bcma: bus0: Core 1 found: DMA (manuf 0x4BF, id 0x502, rev 0x01, class 0x0) bcma: bus0: Core 2 found: GBit MAC (manuf 0x4BF, id 0x82D, rev 0x04, class 0x0) bcma: bus0: Core 3 found: GBit MAC (manuf 0x4BF, id 0x82D, rev 0x04, class 0x0) bcma: bus0: Core 4 found: GBit MAC (manuf 0x4BF, id 0x82D, rev 0x04, class 0x0) bcma: bus0: Core 5 found: GBit MAC (manuf 0x4BF, id 0x82D, rev 0x04, class 0x0) bcma: bus0: Core 6 found: PCIe Gen 2 (manuf 0x4BF, id 0x501, rev 0x01, class 0x0) bcma: bus0: Core 7 found: PCIe Gen 2 (manuf 0x4BF, id 0x501, rev 0x01, class 0x0) bcma: bus0: Core 8 found: ARM Cortex A9 core (ihost) (manuf 0x4BF, id 0x510, rev 0x01, class 0x0) bcma: bus0: Core 9 found: USB 2.0 (manuf 0x4BF, id 0x504, rev 0x01, class 0x0) bcma: bus0: Core 10 found: USB 3.0 (manuf 0x4BF, id 0x505, rev 0x01, class 0x0) bcma: bus0: Core 11 found: SDIO3 (manuf 0x4BF, id 0x503, rev 0x01, class 0x0) bcma: bus0: Core 12 found: ARM Cortex A9 JTAG (manuf 0x4BF, id 0x506, rev 0x01, class 0x0) bcma: bus0: Core 13 found: Denali DDR2/DDR3 memory controller (manuf 0x4BF, id 0x507, rev 0x01, class 0x0) bcma: bus0: Core 14 found: ROM (manuf 0x4BF, id 0x508, rev 0x01, class 0x0) bcma: bus0: Core 15 found: NAND flash controller (manuf 0x4BF, id 0x509, rev 0x01, class 0x0) bcma: bus0: Core 16 found: SPI flash controller (manuf 0x4BF, id 0x50A, rev 0x01, class 0x0) Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index 8bffa5c..f2f1415 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -32,6 +32,18 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = { { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" }, { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" }, { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" }, + { BCMA_CORE_PCIEG2, "PCIe Gen 2" }, + { BCMA_CORE_DMA, "DMA" }, + { BCMA_CORE_SDIO3, "SDIO3" }, + { BCMA_CORE_USB20, "USB 2.0" }, + { BCMA_CORE_USB30, "USB 3.0" }, + { BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" }, + { BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" }, + { BCMA_CORE_ROM, "ROM" }, + { BCMA_CORE_NAND, "NAND flash controller" }, + { BCMA_CORE_QSPI, "SPI flash controller" }, + { BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" }, + { BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" }, { BCMA_CORE_AMEMC, "AMEMC (DDR)" }, { BCMA_CORE_ALTA, "ALTA (I2S)" }, { BCMA_CORE_INVALID, "Invalid" }, diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 622fc50..8fee028 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -72,7 +72,19 @@ struct bcma_host_ops { /* Core-ID values. */ #define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */ #define BCMA_CORE_4706_CHIPCOMMON 0x500 +#define BCMA_CORE_PCIEG2 0x501 +#define BCMA_CORE_DMA 0x502 +#define BCMA_CORE_SDIO3 0x503 +#define BCMA_CORE_USB20 0x504 +#define BCMA_CORE_USB30 0x505 +#define BCMA_CORE_A9JTAG 0x506 +#define BCMA_CORE_DDR23 0x507 +#define BCMA_CORE_ROM 0x508 +#define BCMA_CORE_NAND 0x509 +#define BCMA_CORE_QSPI 0x50A +#define BCMA_CORE_CHIPCOMMON_B 0x50B #define BCMA_CORE_4706_SOC_RAM 0x50E +#define BCMA_CORE_ARMCA9 0x510 #define BCMA_CORE_4706_MAC_GBIT 0x52D #define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */ #define BCMA_CORE_ALTA 0x534 /* I2S core */ -- cgit v0.10.2 From 6ffdead8027cad2c9aac95d8d53152ca7d7421c8 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Mon, 15 Jul 2013 13:15:05 +0200 Subject: bcma: make it possible to select SoC support without mips To make it possible to use the SoC host interface with ARM SoCs do not depend on the MIPS driver any more. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index 380a200..7c081b3 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -35,8 +35,14 @@ config BCMA_DRIVER_PCI_HOSTMODE PCI core hostmode operation (external PCI bus). config BCMA_HOST_SOC - bool - depends on BCMA_DRIVER_MIPS + bool "Support for BCMA in a SoC" + depends on BCMA + help + Host interface for a Broadcom AIX bus directly mapped into + the memory. This only works with the Broadcom SoCs from the + BCM47XX line. + + If unsure, say N config BCMA_DRIVER_MIPS bool "BCMA Broadcom MIPS core driver" -- cgit v0.10.2 From 16041990d1c75efb4408d19413cf4fd27aa148dd Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Mon, 15 Jul 2013 13:15:06 +0200 Subject: bcma: add constants for new ARM based SoCs These are the chipIDs of some ARM based SoCs from the BCM47xx line. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 8fee028..4d043c3 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -189,6 +189,11 @@ struct bcma_host_ops { #define BCMA_PKG_ID_BCM5357 11 #define BCMA_CHIP_ID_BCM53572 53572 #define BCMA_PKG_ID_BCM47188 9 +#define BCMA_CHIP_ID_BCM4707 53010 +#define BCMA_PKG_ID_BCM4707 1 +#define BCMA_PKG_ID_BCM4708 2 +#define BCMA_PKG_ID_BCM4709 0 +#define BCMA_CHIP_ID_BCM53018 53018 /* Board types (on PCI usually equals to the subsystem dev id) */ /* BCM4313 */ -- cgit v0.10.2 From 1cabf7dba57011339d7e5a44b2c9829305936372 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Mon, 15 Jul 2013 13:15:07 +0200 Subject: bcma: return correct error code when bus scan failed It is better to return the actual error code than just -1. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 0067422..90ee350 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -237,7 +237,7 @@ int bcma_bus_register(struct bcma_bus *bus) err = bcma_bus_scan(bus); if (err) { bcma_err(bus, "Failed to scan: %d\n", err); - return -1; + return err; } /* Early init CC core */ -- cgit v0.10.2 From fd4edf197544bae1c77d84bad354aa7ce1d08ce1 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Mon, 15 Jul 2013 13:15:08 +0200 Subject: bcma: fix handling of big addrl The return value of bcma_erom_get_addr_desc() is a unsigned value and it could wrap around in the two complement writing. This happens for one core in the BCM4708 SoC. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index f2f1415..cd6b20f 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -213,7 +213,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr) return ent; } -static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, +static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, u32 type, u8 port) { u32 addrl, addrh, sizel, sizeh = 0; @@ -225,7 +225,7 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, ((ent & SCAN_ADDR_TYPE) != type) || (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) { bcma_erom_push_ent(eromptr); - return -EINVAL; + return (u32)-EINVAL; } addrl = ent & SCAN_ADDR_ADDR; @@ -273,7 +273,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, struct bcma_device_id *match, int core_num, struct bcma_device *core) { - s32 tmp; + u32 tmp; u8 i, j; s32 cia, cib; u8 ports[2], wrappers[2]; @@ -351,11 +351,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, * the main register space for the core */ tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0); - if (tmp <= 0) { + if (tmp == 0 || IS_ERR_VALUE(tmp)) { /* Try again to see if it is a bridge */ tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_BRIDGE, 0); - if (tmp <= 0) { + if (tmp == 0 || IS_ERR_VALUE(tmp)) { return -EILSEQ; } else { bcma_info(bus, "Bridge found\n"); @@ -369,7 +369,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, for (j = 0; ; j++) { tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, i); - if (tmp < 0) { + if (IS_ERR_VALUE(tmp)) { /* no more entries for port _i_ */ /* pr_debug("erom: slave port %d " * "has %d descriptors\n", i, j); */ @@ -386,7 +386,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, for (j = 0; ; j++) { tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_MWRAP, i); - if (tmp < 0) { + if (IS_ERR_VALUE(tmp)) { /* no more entries for port _i_ */ /* pr_debug("erom: master wrapper %d " * "has %d descriptors\n", i, j); */ @@ -404,7 +404,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, for (j = 0; ; j++) { tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SWRAP, i + hack); - if (tmp < 0) { + if (IS_ERR_VALUE(tmp)) { /* no more entries for port _i_ */ /* pr_debug("erom: master wrapper %d " * has %d descriptors\n", i, j); */ -- cgit v0.10.2 From a76e9ff18b7c4a3f8fc812fb71f55cb2eec74ba8 Mon Sep 17 00:00:00 2001 From: John Greene Date: Mon, 15 Jul 2013 14:33:34 -0400 Subject: brcmsmac: Further reduce log spam from tx phy messages Relegate 2 phy messages to debug status as they create excessive log spam, noted in multiple bugzillas for brcmsmac v3.8 and up. This is a follow on to net-next 99e94940697adec4f84758adb2db71f4a82c7ba5: brcmsmac: Reduce log spam in heavy tx, make err print in debug brcmsmac bcma0:0: phyerr 0x10, rate 0x14 brcmsmac bcma0:0: brcms_c_ampdu_dotxstatus_complete: ampdu tx phy error (0x10) Signed-off-by: John Greene Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index bd98285..fa391e4 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -928,9 +928,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, } } else if (txs->phyerr) { update_rate = false; - brcms_err(wlc->hw->d11core, - "%s: ampdu tx phy error (0x%x)\n", - __func__, txs->phyerr); + brcms_dbg_ht(wlc->hw->d11core, + "%s: ampdu tx phy error (0x%x)\n", + __func__, txs->phyerr); } } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 9fd6f2f..7ca10bf 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -882,8 +882,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) mcl = le16_to_cpu(txh->MacTxControlLow); if (txs->phyerr) - brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n", - txs->phyerr, txh->MainRates); + brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n", + txs->phyerr, txh->MainRates); if (txs->frameid != le16_to_cpu(txh->TxFrameID)) { brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n"); -- cgit v0.10.2 From 4b03f16eb56820a1427efeb5372d982a36655315 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 16 Jul 2013 12:03:17 +0530 Subject: ath9k: Move INI overrides to ar9003_hw_override_ini Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 1f694ab..57e345a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -632,6 +632,22 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) REG_SET_BIT(ah, AR_PHY_CCK_DETECT, AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); + + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { + REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE, + AR_GLB_SWREG_DISCONT_EN_BT_WLAN); + + if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, + AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) + ah->enabled_cals |= TX_IQ_CAL; + else + ah->enabled_cals &= ~TX_IQ_CAL; + + if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) + ah->enabled_cals |= TX_CL_CAL; + else + ah->enabled_cals &= ~TX_CL_CAL; + } } static void ar9003_hw_prog_ini(struct ath_hw *ah, @@ -814,29 +830,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, if (chan->channel == 2484) ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) - REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE, - AR_GLB_SWREG_DISCONT_EN_BT_WLAN); - ah->modes_index = modesIndex; ar9003_hw_override_ini(ah); ar9003_hw_set_channel_regs(ah, chan); ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); ath9k_hw_apply_txpower(ah, chan, false); - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { - if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, - AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) - ah->enabled_cals |= TX_IQ_CAL; - else - ah->enabled_cals &= ~TX_IQ_CAL; - - if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) - ah->enabled_cals |= TX_CL_CAL; - else - ah->enabled_cals &= ~TX_CL_CAL; - } - return 0; } -- cgit v0.10.2 From 81dc75b584fd0f889b56b49b363287a600eaf0d3 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 16 Jul 2013 12:03:18 +0530 Subject: ath9k: Add a HW flag for FCC Fast Channel Change across 2G/5G bands is supported only by AR9462 and AR9565. Add a HW capability field to indicate this. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e420c6b..f353bbd 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2610,6 +2610,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; + /* + * Fast channel change across bands is available + * only for AR9462 and AR9565. + */ + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) + pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH; + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index cd74b3a..fd009e5 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -247,6 +247,7 @@ enum ath9k_hw_caps { ATH9K_HW_CAP_DFS = BIT(16), ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), ATH9K_HW_CAP_PAPRD = BIT(18), + ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19), }; /* -- cgit v0.10.2 From b840cffedee3a19b8c90becc4b999e6bf4f28cb0 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 16 Jul 2013 12:03:19 +0530 Subject: ath9k: Fix FastChannelChange for AR9462/AR9565 Right now, even though these chips support cross-band FCC, the code is non-functional since we bail out early if the channelFlags differ. Fix this so that cross-band FCC works for cards that support this feature. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index f353bbd..0208eee 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1496,16 +1496,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_capabilities *pCap = &ah->caps; + bool band_switch = false, mode_diff = false; + u8 ini_reloaded; u32 qnum; int r; - bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); - bool band_switch, mode_diff; - u8 ini_reloaded; - band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) != - (ah->curchan->channelFlags & (CHANNEL_2GHZ | - CHANNEL_5GHZ)); - mode_diff = (chan->chanmode != ah->curchan->chanmode); + if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) { + u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ); + u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ); + band_switch = (cur != new); + mode_diff = (chan->chanmode != ah->curchan->chanmode); + } for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { if (ath9k_hw_numtxpending(ah, qnum)) { @@ -1520,7 +1522,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, return false; } - if (edma && (band_switch || mode_diff)) { + if (band_switch || mode_diff) { ath9k_hw_mark_phy_inactive(ah); udelay(5); @@ -1548,7 +1550,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, ath9k_hw_spur_mitigate_freq(ah, chan); - if (edma && (band_switch || mode_diff)) { + if (band_switch || mode_diff) { ah->ah_flags |= AH_FASTCC; if (band_switch || ini_reloaded) ah->eep_ops->set_board_values(ah, chan); @@ -1778,16 +1780,11 @@ static void ath9k_hw_init_desc(struct ath_hw *ah) /* * Fast channel change: * (Change synthesizer based on channel freq without resetting chip) - * - * Don't do FCC when - * - Flag is not set - * - Chip is just coming out of full sleep - * - Channel to be set is same as current channel - * - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel) */ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_capabilities *pCap = &ah->caps; int ret; if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI) @@ -1806,9 +1803,21 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) (CHANNEL_HALF | CHANNEL_QUARTER)) goto fail; - if ((chan->channelFlags & CHANNEL_ALL) != - (ah->curchan->channelFlags & CHANNEL_ALL)) - goto fail; + /* + * If cross-band fcc is not supoprted, bail out if + * either channelFlags or chanmode differ. + * + * chanmode will be different if the HT operating mode + * changes because of CSA. + */ + if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) { + if ((chan->channelFlags & CHANNEL_ALL) != + (ah->curchan->channelFlags & CHANNEL_ALL)) + goto fail; + + if (chan->chanmode != ah->curchan->chanmode) + goto fail; + } if (!ath9k_hw_check_alive(ah)) goto fail; -- cgit v0.10.2 From 5f35c0fae9162b867d6cd035490fe4831151301d Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 16 Jul 2013 12:03:20 +0530 Subject: ath9k: Use correct channel when switching bands Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 0208eee..04f3a3b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1526,7 +1526,8 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, ath9k_hw_mark_phy_inactive(ah); udelay(5); - ath9k_hw_init_pll(ah, NULL); + if (band_switch) + ath9k_hw_init_pll(ah, chan); if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) { ath_err(common, "Failed to do fast channel change\n"); -- cgit v0.10.2 From 07a9bd205575cee4907089c17e41da2c5cb1af2e Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 16 Jul 2013 12:03:21 +0530 Subject: ath9k: Program correct initvals for FCC * CUS217 specific initvals have to be programmed. * iniAdditional is not used for AR9462/AR9565, remove it. * Handle channel 2484 for regulatory compliance. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 57e345a..2db4ddf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1517,6 +1517,18 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah, REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); + if (AR_SREV_9462_20_OR_LATER(ah)) { + /* + * CUS217 mix LNA mode. + */ + if (ar9003_hw_get_rx_gain_idx(ah) == 2) { + REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core, + 1, regWrites); + REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble, + modesIndex, regWrites); + } + } + /* * For 5GHz channels requiring Fast Clock, apply * different modal values. @@ -1527,7 +1539,11 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah, if (AR_SREV_9565(ah)) REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites); - REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); + /* + * JAPAN regulatory. + */ + if (chan->channel == 2484) + ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); ah->modes_index = modesIndex; *ini_reloaded = true; -- cgit v0.10.2 From 70e89a71c83b1937f1662429b255cf21e51aecea Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 16 Jul 2013 12:03:22 +0530 Subject: ath9k: Release the RF bus after setting board values Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 04f3a3b..d55d97c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1498,7 +1498,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_capabilities *pCap = &ah->caps; bool band_switch = false, mode_diff = false; - u8 ini_reloaded; + u8 ini_reloaded = 0; u32 qnum; int r; @@ -1544,22 +1544,21 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, } ath9k_hw_set_clockrate(ah); ath9k_hw_apply_txpower(ah, chan, false); - ath9k_hw_rfbus_done(ah); if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) ath9k_hw_set_delta_slope(ah, chan); ath9k_hw_spur_mitigate_freq(ah, chan); - if (band_switch || mode_diff) { - ah->ah_flags |= AH_FASTCC; - if (band_switch || ini_reloaded) - ah->eep_ops->set_board_values(ah, chan); + if (band_switch || ini_reloaded) + ah->eep_ops->set_board_values(ah, chan); - ath9k_hw_init_bb(ah, chan); + ath9k_hw_init_bb(ah, chan); + ath9k_hw_rfbus_done(ah); - if (band_switch || ini_reloaded) - ath9k_hw_init_cal(ah, chan); + if (band_switch || ini_reloaded) { + ah->ah_flags |= AH_FASTCC; + ath9k_hw_init_cal(ah, chan); ah->ah_flags &= ~AH_FASTCC; } -- cgit v0.10.2 From f291f7deeeef84bd113ff3c309862a888e8f181b Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Sat, 20 Jul 2013 00:02:25 -0400 Subject: cw1200: Fix incorrect endianness annotation in a header field Note that the driver doesn't directly use this field, but it should be correctly defined in any case. Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h index 7afc613..48086e8 100644 --- a/drivers/net/wireless/cw1200/wsm.h +++ b/drivers/net/wireless/cw1200/wsm.h @@ -832,7 +832,7 @@ struct wsm_tx { /* the MSDU shall be terminated. Overrides the global */ /* dot11MaxTransmitMsduLifeTime setting [optional] */ /* Device will set the default value if this is 0. */ - u32 expire_time; + __le32 expire_time; /* WSM_HT_TX_... */ __le32 ht_tx_parameters; -- cgit v0.10.2 From c2a146f61014543fc4b52acaddcaa31b9f17453d Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Sun, 21 Jul 2013 11:34:36 +0300 Subject: wil6210: fix error path in wil_tx_vring Release fragments in the order of allocation; including one for skb head Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 2a9d56a..e563af1 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -730,12 +730,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, return 0; dma_error: /* unmap what we have mapped */ - /* Note: increment @f to operate with positive index */ - for (f++; f > 0; f--) { + nr_frags = f + 1; /* frags mapped + one for skb head */ + for (f = 0; f < nr_frags; f++) { u16 dmalen; - struct wil_ctx *ctx = &vring->ctx[i]; + struct wil_ctx *ctx; i = (swhead + f) % vring->size; + ctx = &vring->ctx[i]; _d = &(vring->va[i].tx); *d = *_d; _d->dma.status = TX_DMA_STATUS_DU; -- cgit v0.10.2 From 504937d4933fc8f5248b3af63b350ca52fc3b2f7 Mon Sep 17 00:00:00 2001 From: Kirshenbaum Erez Date: Sun, 21 Jul 2013 11:34:37 +0300 Subject: wil6210: Enable TCP/UDP checksum HW offload Add support for TCP and UDP HW checksum offloading. RX chain is allways configured for offload mode. In case of checksum error in RX path the DMA L4 error bit(5) will be set to 1 and driver will drop the packet. TX checksum offloading is configrable (ethtool -K). TX descriptors are configured for checksum offload according to the SKB protocol type (TCP/UDP, IPV4/6), Upon mismatch drop the TX packet (checksum required but not TCP/UDP IPV4/6 type). Signed-off-by: Kirshenbaum Erez Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 29dd1e5..717178f 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -127,6 +127,8 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) ndev->netdev_ops = &wil_netdev_ops; ndev->ieee80211_ptr = wdev; + ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index e563af1..ea1abeb 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -18,6 +18,9 @@ #include #include #include +#include +#include +#include #include "wil6210.h" #include "wmi.h" @@ -407,6 +410,21 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, return NULL; } + /* L4 IDENT is on when HW calculated checksum, check status + * and in case of error drop the packet + * higher stack layers will handle retransmission (if required) + */ + if (d->dma.status & RX_DMA_STATUS_L4_IDENT) { + /* L4 protocol identified, csum calculated */ + if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + wil_err(wil, "Incorrect checksum reported\n"); + kfree_skb(skb); + return NULL; + } + } + ds_bits = wil_rxdesc_ds_bits(d); if (ds_bits == 1) { /* @@ -646,6 +664,53 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, return 0; } +static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, + struct vring_tx_desc *d, + struct sk_buff *skb) +{ + int protocol; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + switch (skb->protocol) { + case cpu_to_be16(ETH_P_IP): + protocol = ip_hdr(skb)->protocol; + break; + case cpu_to_be16(ETH_P_IPV6): + protocol = ipv6_hdr(skb)->nexthdr; + break; + default: + return -EINVAL; + } + + switch (protocol) { + case IPPROTO_TCP: + d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); + /* L4 header len: TCP header length */ + d->dma.d0 |= + (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); + break; + case IPPROTO_UDP: + /* L4 header len: UDP header length */ + d->dma.d0 |= + (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); + break; + default: + return -EINVAL; + } + + d->dma.ip_length = skb_network_header_len(skb); + d->dma.b11 = ETH_HLEN; /* MAC header length */ + d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); + /* Enable TCP/UDP checksum */ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); + /* Calculate pseudo-header */ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); + + return 0; +} + static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, struct sk_buff *skb) { @@ -655,7 +720,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, u32 swhead = vring->swhead; int avail = wil_vring_avail_tx(vring); int nr_frags = skb_shinfo(skb)->nr_frags; - uint f; + uint f = 0; int vring_index = vring - wil->vring_tx; uint i = swhead; dma_addr_t pa; @@ -686,13 +751,20 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, return -EINVAL; /* 1-st segment */ wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); + /* Process TCP/UDP checksum offloading */ + if (wil_tx_desc_offload_cksum_set(wil, d, skb)) { + wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n", + vring_index); + goto dma_error; + } + d->mac.d[2] |= ((nr_frags + 1) << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); if (nr_frags) *_d = *d; /* middle segments */ - for (f = 0; f < nr_frags; f++) { + for (; f < nr_frags; f++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; int len = skb_frag_size(frag); diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 859aea6..b382827 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -235,7 +235,16 @@ struct vring_tx_mac { #define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30 #define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2 -#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 +#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */ + + +#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0 +#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7 +#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */ + +#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7 +#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1 +#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */ #define TX_DMA_STATUS_DU BIT(0) @@ -334,8 +343,17 @@ struct vring_rx_mac { #define RX_DMA_D0_CMD_DMA_IT BIT(10) +/* Error field, offload bits */ +#define RX_DMA_ERROR_L3_ERR BIT(4) +#define RX_DMA_ERROR_L4_ERR BIT(5) + + +/* Status field */ #define RX_DMA_STATUS_DU BIT(0) #define RX_DMA_STATUS_ERROR BIT(2) + +#define RX_DMA_STATUS_L3_IDENT BIT(4) +#define RX_DMA_STATUS_L4_IDENT BIT(5) #define RX_DMA_STATUS_PHY_INFO BIT(6) struct vring_rx_dma { diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index a62511a..5220f15 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -924,6 +924,12 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) cmd.sniffer_cfg.phy_support = cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); + } else { + /* Initialize offload (in non-sniffer mode). + * Linux IP stack always calculates IP checksum + * HW always calculate TCP/UDP checksum + */ + cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS); } /* typical time for secure PCP is 840ms */ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), -- cgit v0.10.2 From c8ac8aa8d43561fefac7646339b2613767c9fa10 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Mon, 22 Jul 2013 12:32:20 +0300 Subject: MAINTAINERS: add ath10k I forgot to add an entry to MAINTAINERS when submitting the driver. Signed-off-by: Kalle Valo Signed-off-by: John W. Linville diff --git a/MAINTAINERS b/MAINTAINERS index bf61e04..44626b8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6719,6 +6719,14 @@ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/tuners/qt1010* +QUALCOMM ATHEROS ATH10K WIRELESS DRIVER +M: Kalle Valo +L: ath10k@lists.infradead.org +W: http://wireless.kernel.org/en/users/Drivers/ath10k +T: git git://github.com/kvalo/ath.git +S: Supported +F: drivers/net/wireless/ath/ath10k/ + QUALCOMM HEXAGON ARCHITECTURE M: Richard Kuo L: linux-hexagon@vger.kernel.org -- cgit v0.10.2 From 7309e5022bdd6fe13124d42a6b7cf054311030a0 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Mon, 22 Jul 2013 12:32:27 +0300 Subject: MAINTAINERS: update ath6kl git location The git tree is in github.com nowadays. Signed-off-by: Kalle Valo Signed-off-by: John W. Linville diff --git a/MAINTAINERS b/MAINTAINERS index 44626b8..7a403ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1406,7 +1406,7 @@ ATHEROS ATH6KL WIRELESS DRIVER M: Kalle Valo L: linux-wireless@vger.kernel.org W: http://wireless.kernel.org/en/users/Drivers/ath6kl -T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git +T: git git://github.com/kvalo/ath.git S: Supported F: drivers/net/wireless/ath/ath6kl/ -- cgit v0.10.2 From c6c7788fe26fdc91e729f60742815ccdb505fd81 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Mon, 22 Jul 2013 12:53:48 +0300 Subject: wil6210: drop -Werror compiler flag In production code, don't use -Werror, as it causes random compilation failures due to compiler version and options used. With every new version of gcc, it becomes stricter and report more warnings. Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index f891d51..990dd42a 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -11,9 +11,6 @@ wil6210-y += txrx.o wil6210-y += debug.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o -ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) - subdir-ccflags-y += -Werror -endif # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) -- cgit v0.10.2 From cb820f8e4b7f73d1a32175e6591735b25bb5398d Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Fri, 19 Jul 2013 19:40:09 +0200 Subject: net: Provide a generic socket error queue delivery method for Tx time stamps. This patch moves the private error queue delivery function from the af_packet code to the core socket method. In this way, network layers only needing the error queue for transmit time stamping can share common code. Signed-off-by: Richard Cochran Signed-off-by: David S. Miller diff --git a/include/net/sock.h b/include/net/sock.h index 95a5a2c..e0473f6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2249,6 +2249,8 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb) extern void sock_enable_timestamp(struct sock *sk, int flag); extern int sock_get_timestamp(struct sock *, struct timeval __user *); extern int sock_get_timestampns(struct sock *, struct timespec __user *); +extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, + int level, int type); /* * Enable debug/info messages diff --git a/net/core/sock.c b/net/core/sock.c index 548d716..85e8de1 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -93,6 +93,7 @@ #include #include +#include #include #include #include @@ -2425,6 +2426,52 @@ void sock_enable_timestamp(struct sock *sk, int flag) } } +int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, + int level, int type) +{ + struct sock_exterr_skb *serr; + struct sk_buff *skb, *skb2; + int copied, err; + + err = -EAGAIN; + skb = skb_dequeue(&sk->sk_error_queue); + if (skb == NULL) + goto out; + + copied = skb->len; + if (copied > len) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (err) + goto out_free_skb; + + sock_recv_timestamp(msg, sk, skb); + + serr = SKB_EXT_ERR(skb); + put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); + + msg->msg_flags |= MSG_ERRQUEUE; + err = copied; + + /* Reset and regenerate socket error */ + spin_lock_bh(&sk->sk_error_queue.lock); + sk->sk_err = 0; + if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { + sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; + spin_unlock_bh(&sk->sk_error_queue.lock); + sk->sk_error_report(sk); + } else + spin_unlock_bh(&sk->sk_error_queue.lock); + +out_free_skb: + kfree_skb(skb); +out: + return err; +} +EXPORT_SYMBOL(sock_recv_errqueue); + /* * Get a socket option on an socket. * diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4b66c75..4cb28a7 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2638,51 +2638,6 @@ out: return err; } -static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len) -{ - struct sock_exterr_skb *serr; - struct sk_buff *skb, *skb2; - int copied, err; - - err = -EAGAIN; - skb = skb_dequeue(&sk->sk_error_queue); - if (skb == NULL) - goto out; - - copied = skb->len; - if (copied > len) { - msg->msg_flags |= MSG_TRUNC; - copied = len; - } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); - if (err) - goto out_free_skb; - - sock_recv_timestamp(msg, sk, skb); - - serr = SKB_EXT_ERR(skb); - put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP, - sizeof(serr->ee), &serr->ee); - - msg->msg_flags |= MSG_ERRQUEUE; - err = copied; - - /* Reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { - sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else - spin_unlock_bh(&sk->sk_error_queue.lock); - -out_free_skb: - kfree_skb(skb); -out: - return err; -} - /* * Pull a packet from our receive queue and hand it to the user. * If necessary we block. @@ -2708,7 +2663,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, #endif if (flags & MSG_ERRQUEUE) { - err = packet_recv_error(sk, msg, len); + err = sock_recv_errqueue(sk, msg, len, + SOL_PACKET, PACKET_TX_TIMESTAMP); goto out; } -- cgit v0.10.2 From eda297729171fe16bf34fe5b0419dfb69060f623 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Fri, 19 Jul 2013 19:40:10 +0200 Subject: tun: Support software transmit time stamping. This patch adds transmit time stamping to the tun/tap driver. Similar support already exists for UDP, can, and raw packets. Signed-off-by: Richard Cochran Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db690a3..a72d141 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -739,6 +739,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) >= dev->tx_queue_len / tun->numqueues) goto drop; + if (skb->sk) { + sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); + sw_tx_timestamp(skb); + } + /* Orphan the skb - required as we might hang on to it * for indefinite time. */ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) @@ -1476,7 +1481,6 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, return ret; } - static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) @@ -1488,10 +1492,15 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, if (!tun) return -EBADFD; - if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { + if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; goto out; } + if (flags & MSG_ERRQUEUE) { + ret = sock_recv_errqueue(sock->sk, m, total_len, + SOL_PACKET, TUN_TX_TIMESTAMP); + goto out; + } ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len, flags & MSG_DONTWAIT); if (ret > total_len) { @@ -2274,6 +2283,7 @@ static const struct ethtool_ops tun_ethtool_ops = { .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index 82334f8..1870ee2 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -71,6 +71,9 @@ /* read-only flag */ #define IFF_PERSIST 0x0800 +/* Socket options */ +#define TUN_TX_TIMESTAMP 1 + /* Features for GSO (TUNSETOFFLOAD). */ #define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ #define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ -- cgit v0.10.2 From 9514fe7a2352e35ff571467afba24ab904a1befe Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Jul 2013 13:24:59 -0300 Subject: fec: Do not enable/disable optional clocks unconditionally clk_enet_out and clk_ptp are optional clocks, so we should not enable/disable them unconditionally. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d3ad5ea..beaa576 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2105,8 +2105,10 @@ fec_probe(struct platform_device *pdev) clk_prepare_enable(fep->clk_ahb); clk_prepare_enable(fep->clk_ipg); - clk_prepare_enable(fep->clk_enet_out); - clk_prepare_enable(fep->clk_ptp); + if (fep->clk_enet_out) + clk_prepare_enable(fep->clk_enet_out); + if (fep->clk_ptp) + clk_prepare_enable(fep->clk_ptp); fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { @@ -2179,8 +2181,10 @@ failed_init: failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); - clk_disable_unprepare(fep->clk_enet_out); - clk_disable_unprepare(fep->clk_ptp); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_ptp) + clk_disable_unprepare(fep->clk_ptp); failed_clk: failed_ioremap: free_netdev(ndev); @@ -2206,10 +2210,12 @@ fec_drv_remove(struct platform_device *pdev) } if (fep->reg_phy) regulator_disable(fep->reg_phy); - clk_disable_unprepare(fep->clk_ptp); + if (fep->clk_ptp) + clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); - clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); free_netdev(ndev); @@ -2228,7 +2234,8 @@ fec_suspend(struct device *dev) fec_stop(ndev); netif_device_detach(ndev); } - clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); @@ -2251,7 +2258,8 @@ fec_resume(struct device *dev) return ret; } - clk_prepare_enable(fep->clk_enet_out); + if (fep->clk_enet_out) + clk_prepare_enable(fep->clk_enet_out); clk_prepare_enable(fep->clk_ahb); clk_prepare_enable(fep->clk_ipg); if (netif_running(ndev)) { -- cgit v0.10.2 From d265cf48ea0a5cf6d201abb0bfab6674019126d4 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Jul 2013 13:25:00 -0300 Subject: fec: Fix the order for enabling/disabling the clocks On fec_probe the clocks are enabled in the following order: clk_ahb -> clk_ipg -> clk_enet_out -> clk_ptp , so in the error and remove paths we should disabled them in the opposite order. Also fix the order in the suspend/resume functions. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index beaa576..5c34fd8 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2179,12 +2179,12 @@ failed_init: if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: - clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); - if (fep->clk_enet_out) - clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) clk_disable_unprepare(fep->clk_ptp); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); + clk_disable_unprepare(fep->clk_ipg); + clk_disable_unprepare(fep->clk_ahb); failed_clk: failed_ioremap: free_netdev(ndev); @@ -2216,8 +2216,8 @@ fec_drv_remove(struct platform_device *pdev) ptp_clock_unregister(fep->ptp_clock); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); - clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); + clk_disable_unprepare(fep->clk_ahb); free_netdev(ndev); return 0; @@ -2236,8 +2236,8 @@ fec_suspend(struct device *dev) } if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); - clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); + clk_disable_unprepare(fep->clk_ahb); if (fep->reg_phy) regulator_disable(fep->reg_phy); @@ -2258,10 +2258,10 @@ fec_resume(struct device *dev) return ret; } - if (fep->clk_enet_out) - clk_prepare_enable(fep->clk_enet_out); clk_prepare_enable(fep->clk_ahb); clk_prepare_enable(fep->clk_ipg); + if (fep->clk_enet_out) + clk_prepare_enable(fep->clk_enet_out); if (netif_running(ndev)) { fec_restart(ndev, fep->full_duplex); netif_device_attach(ndev); -- cgit v0.10.2 From 79820e725f3ac5db694c49c75088d79fde78b671 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Jul 2013 13:25:01 -0300 Subject: fec: Enable/disable clk_ptp in suspend/resume clk_ptp should also be enabled in fec_resume() and disabled in fec_suspend(). Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5c34fd8..0a4ace7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2234,6 +2234,8 @@ fec_suspend(struct device *dev) fec_stop(ndev); netif_device_detach(ndev); } + if (fep->clk_ptp) + clk_disable_unprepare(fep->clk_ptp); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ipg); @@ -2262,6 +2264,8 @@ fec_resume(struct device *dev) clk_prepare_enable(fep->clk_ipg); if (fep->clk_enet_out) clk_prepare_enable(fep->clk_enet_out); + if (fep->clk_ptp) + clk_prepare_enable(fep->clk_ptp); if (netif_running(ndev)) { fec_restart(ndev, fep->full_duplex); netif_device_attach(ndev); -- cgit v0.10.2 From 13a097bd35a15bbec00479adfb2ef5360e8eabef Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Jul 2013 13:25:02 -0300 Subject: fec: Check the return value from clk_prepare_enable() clk_prepare_enable() may fail, so let's check its return value and propagate it in the case of error. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0a4ace7..fc2cccd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2103,12 +2103,25 @@ fec_probe(struct platform_device *pdev) fep->bufdesc_ex = 0; } - clk_prepare_enable(fep->clk_ahb); - clk_prepare_enable(fep->clk_ipg); - if (fep->clk_enet_out) - clk_prepare_enable(fep->clk_enet_out); - if (fep->clk_ptp) - clk_prepare_enable(fep->clk_ptp); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk; + + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + if (fep->clk_enet_out) { + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + } + + if (fep->clk_ptp) { + ret = clk_prepare_enable(fep->clk_ptp); + if (ret) + goto failed_clk_ptp; + } fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { @@ -2181,9 +2194,12 @@ failed_init: failed_regulator: if (fep->clk_ptp) clk_disable_unprepare(fep->clk_ptp); +failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); +failed_clk_enet_out: clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: clk_disable_unprepare(fep->clk_ahb); failed_clk: failed_ioremap: @@ -2260,18 +2276,44 @@ fec_resume(struct device *dev) return ret; } - clk_prepare_enable(fep->clk_ahb); - clk_prepare_enable(fep->clk_ipg); - if (fep->clk_enet_out) - clk_prepare_enable(fep->clk_enet_out); - if (fep->clk_ptp) - clk_prepare_enable(fep->clk_ptp); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; + + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + if (fep->clk_enet_out) { + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + } + + if (fep->clk_ptp) { + ret = clk_prepare_enable(fep->clk_ptp); + if (ret) + goto failed_clk_ptp; + } + if (netif_running(ndev)) { fec_restart(ndev, fep->full_duplex); netif_device_attach(ndev); } return 0; + +failed_clk_ptp: + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); +failed_clk_enet_out: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + return ret; } #endif /* CONFIG_PM_SLEEP */ -- cgit v0.10.2 From 399db75b2cc62aa67d465158b11fafca9ca9b408 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Jul 2013 13:25:03 -0300 Subject: fec: Remove unneeded check in platform_get_resource() As devm_ioremap_resource() is used, there is no need to explicitely check the return value from platform_get_resource(), as this is something that devm_ioremap_resource() takes care by itself. Also, place platform_get_resource() prior to devm_ioremap_resource() for better code readability. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index fc2cccd..e69760e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2033,10 +2033,6 @@ fec_probe(struct platform_device *pdev) if (of_id) pdev->id_entry = of_id->data; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) - return -ENXIO; - /* Init network device */ ndev = alloc_etherdev(sizeof(struct fec_enet_private)); if (!ndev) @@ -2054,6 +2050,7 @@ fec_probe(struct platform_device *pdev) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; #endif + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); fep->hwp = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(fep->hwp)) { ret = PTR_ERR(fep->hwp); -- cgit v0.10.2 From 0d9b2ab1c3760a7ca28293c0ae48c2302d71ce96 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Jul 2013 13:25:04 -0300 Subject: fec: Use devm_request_irq() Using devm_request_irq() can make the code smaller and cleaner. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e69760e..92d34d9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2149,14 +2149,10 @@ fec_probe(struct platform_device *pdev) ret = irq; goto failed_irq; } - ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); - if (ret) { - while (--i >= 0) { - irq = platform_get_irq(pdev, i); - free_irq(irq, ndev); - } + ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, + IRQF_DISABLED, pdev->name, ndev); + if (ret) goto failed_irq; - } } ret = fec_enet_mii_init(pdev); @@ -2180,11 +2176,6 @@ failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_irq: - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } failed_init: if (fep->reg_phy) regulator_disable(fep->reg_phy); @@ -2210,17 +2201,11 @@ fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - int i; cancel_delayed_work_sync(&(fep->delay_work.delay_work)); unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); - for (i = 0; i < FEC_IRQ_NUM; i++) { - int irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } if (fep->reg_phy) regulator_disable(fep->reg_phy); if (fep->clk_ptp) -- cgit v0.10.2 From 375fe02c91792917aa26d68a87ab110d1937f44e Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Mon, 22 Jul 2013 16:20:45 -0700 Subject: tcp: consolidate SYNACK RTT sampling The first patch consolidates SYNACK and other RTT measurement to use a central function tcp_ack_update_rtt(). A (small) bonus is now SYNACK RTT measurement happens after PAWS check, potentially reducing the impact of RTO seeding on bad TCP timestamps values. Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller diff --git a/include/net/tcp.h b/include/net/tcp.h index d198005..f9777db 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1094,15 +1094,6 @@ static inline void tcp_openreq_init(struct request_sock *req, ireq->loc_port = tcp_hdr(skb)->dest; } -/* Compute time elapsed between SYNACK and the ACK completing 3WHS */ -static inline void tcp_synack_rtt_meas(struct sock *sk, - struct request_sock *req) -{ - if (tcp_rsk(req)->snt_synack) - tcp_valid_rtt_meas(sk, - tcp_time_stamp - tcp_rsk(req)->snt_synack); -} - extern void tcp_enter_memory_pressure(struct sock *sk); static inline int keepalive_intvl_when(const struct tcp_sock *tp) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 28af45a..b531710 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2853,6 +2853,17 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, tcp_ack_no_tstamp(sk, seq_rtt, flag); } +/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ +static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) +{ + struct tcp_sock *tp = tcp_sk(sk); + s32 seq_rtt = -1; + + if (tp->lsndtime && !tp->total_retrans) + seq_rtt = tcp_time_stamp - tp->lsndtime; + tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt); +} + static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { const struct inet_connection_sock *icsk = inet_csk(sk); @@ -5624,9 +5635,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, * so release it. */ if (req) { - tcp_synack_rtt_meas(sk, req); tp->total_retrans = req->num_retrans; - reqsk_fastopen_remove(sk, req, false); } else { /* Make sure socket is routed, for correct metrics. */ @@ -5651,6 +5660,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); + tcp_synack_rtt_meas(sk, req); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b299da5..2e3f129 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1671,8 +1671,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); - tcp_synack_rtt_meas(newsk, req); - newtp->total_retrans = req->num_retrans; #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index ab1c086..58a3e69 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -411,6 +411,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tcp_enable_early_retrans(newtp); newtp->tlp_high_seq = 0; + newtp->lsndtime = treq->snt_synack; + newtp->total_retrans = req->num_retrans; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control @@ -666,12 +668,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (!(flg & TCP_FLAG_ACK)) return NULL; - /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */ - if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr) - tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr; - else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */ - tcp_rsk(req)->snt_synack = 0; - /* For Fast Open no more processing is needed (sk is the * child socket). */ diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6e1649d..80fe69e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1237,8 +1237,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); - tcp_synack_rtt_meas(newsk, req); - newtp->total_retrans = req->num_retrans; newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; -- cgit v0.10.2 From 5b08e47caf1f2034a3a5b566bbccc8b0be3961ca Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Mon, 22 Jul 2013 16:20:46 -0700 Subject: tcp: prefer packet timing to TS-ECR for RTT Prefer packet timings to TS-ecr for RTT measurements when both sources are available. That's because broken middle-boxes and remote peer can return packets with corrupted TS ECR fields. Similarly most congestion controls that require RTT signals favor timing-based sources as well. Also check for bad TS ECR values to avoid RTT blow-ups. It has happened on production Web servers. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller diff --git a/include/net/tcp.h b/include/net/tcp.h index f9777db..c586847 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -591,7 +591,6 @@ extern void tcp_initialize_rcv_mss(struct sock *sk); extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); extern int tcp_mss_to_mtu(struct sock *sk, int mss); extern void tcp_mtup_init(struct sock *sk); -extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); extern void tcp_init_buffer_space(struct sock *sk); static inline void tcp_bound_rto(const struct sock *sk) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b531710..c7398f0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2792,65 +2792,36 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, tcp_xmit_retransmit_queue(sk); } -void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) +static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, + s32 seq_rtt) { - tcp_rtt_estimator(sk, seq_rtt); - tcp_set_rto(sk); - inet_csk(sk)->icsk_backoff = 0; -} -EXPORT_SYMBOL(tcp_valid_rtt_meas); + const struct tcp_sock *tp = tcp_sk(sk); + + /* Prefer RTT measured from ACK's timing to TS-ECR. This is because + * broken middle-boxes or peers may corrupt TS-ECR fields. But + * Karn's algorithm forbids taking RTT if some retransmitted data + * is acked (RFC6298). + */ + if (flag & FLAG_RETRANS_DATA_ACKED) + seq_rtt = -1; -/* Read draft-ietf-tcplw-high-performance before mucking - * with this code. (Supersedes RFC1323) - */ -static void tcp_ack_saw_tstamp(struct sock *sk, int flag) -{ /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the * left edge of the send window. - * * See draft-ietf-tcplw-high-performance-00, section 3.3. - * 1998/04/10 Andrey V. Savochkin - * - * Changed: reset backoff as soon as we see the first valid sample. - * If we do not, we get strongly overestimated rto. With timestamps - * samples are accepted even from very old segments: f.e., when rtt=1 - * increases to 8, we retransmit 5 times and after 8 seconds delayed - * answer arrives rto becomes 120 seconds! If at least one of segments - * in window is lost... Voila. --ANK (010210) */ - struct tcp_sock *tp = tcp_sk(sk); + if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) + seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; - tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr); -} - -static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) -{ - /* We don't have a timestamp. Can only use - * packets that are not retransmitted to determine - * rtt estimates. Also, we must not reset the - * backoff for rto until we get a non-retransmitted - * packet. This allows us to deal with a situation - * where the network delay has increased suddenly. - * I.e. Karn's algorithm. (SIGCOMM '87, p5.) - */ - - if (flag & FLAG_RETRANS_DATA_ACKED) + if (seq_rtt < 0) return; - tcp_valid_rtt_meas(sk, seq_rtt); -} + tcp_rtt_estimator(sk, seq_rtt); + tcp_set_rto(sk); -static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, - const s32 seq_rtt) -{ - const struct tcp_sock *tp = tcp_sk(sk); - /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ - if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) - tcp_ack_saw_tstamp(sk, flag); - else if (seq_rtt >= 0) - tcp_ack_no_tstamp(sk, seq_rtt, flag); + /* RFC6298: only reset backoff on valid RTT measurement. */ + inet_csk(sk)->icsk_backoff = 0; } /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ @@ -2989,8 +2960,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; - ca_seq_rtt = -1; - seq_rtt = -1; } else { ca_seq_rtt = now - scb->when; last_ackt = skb->tstamp; -- cgit v0.10.2 From 59c9af4234b0c21a1ed05cf65bf014d0c1a67bfd Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Mon, 22 Jul 2013 16:20:47 -0700 Subject: tcp: measure RTT from new SACK Take RTT sample if an ACK selectively acks some sequences that have never been retransmitted. The Karn's algorithm does not apply even if that ACK (s)acks other retransmitted sequences, because it must been generated by an original but perhaps out-of-order packet. There is no ambiguity. In case when multiple blocks are newly sacked because of ACK losses the earliest block is used to measure RTT, similar to cummulative ACKs. Such RTT samples allow the sender to estimate the RTO during loss recovery and packet reordering events. It is still useful even with TCP timestamps. That's because during these events the SND.UNA may not advance preventing RTT samples from TS ECR (thus the FLAG_ACKED check before calling tcp_ack_update_rtt()). Therefore this new RTT source is complementary to existing ACK and TS RTT mechanisms. This patch does not update the RTO. It is done in the next patch. Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c7398f0..b85bc7c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1048,6 +1048,7 @@ struct tcp_sacktag_state { int reord; int fack_count; int flag; + s32 rtt; /* RTT measured by SACKing never-retransmitted data */ }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, @@ -1108,7 +1109,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, - bool dup_sack, int pcount) + int dup_sack, int pcount, u32 xmit_time) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; @@ -1148,6 +1149,9 @@ static u8 tcp_sacktag_one(struct sock *sk, state->reord); if (!after(end_seq, tp->high_seq)) state->flag |= FLAG_ORIG_SACK_ACKED; + /* Pick the earliest sequence sacked for RTT */ + if (state->rtt < 0) + state->rtt = tcp_time_stamp - xmit_time; } if (sacked & TCPCB_LOST) { @@ -1205,7 +1209,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, * tcp_highest_sack_seq() when skb is highest_sack. */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, - start_seq, end_seq, dup_sack, pcount); + start_seq, end_seq, dup_sack, pcount, + TCP_SKB_CB(skb)->when); if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; @@ -1479,7 +1484,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, dup_sack, - tcp_skb_pcount(skb)); + tcp_skb_pcount(skb), + TCP_SKB_CB(skb)->when); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) @@ -1536,7 +1542,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, - u32 prior_snd_una) + u32 prior_snd_una, s32 *sack_rtt) { struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + @@ -1554,6 +1560,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, state.flag = 0; state.reord = tp->packets_out; + state.rtt = -1; if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) @@ -1737,6 +1744,7 @@ out: WARN_ON((int)tp->retrans_out < 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0); #endif + *sack_rtt = state.rtt; return state.flag; } @@ -3254,6 +3262,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_packets = tp->packets_out; const int prior_unsacked = tp->packets_out - tp->sacked_out; int acked = 0; /* Number of packets newly acked */ + s32 sack_rtt = -1; /* If the ack is older than previous acks * then we can probably ignore it. @@ -3310,7 +3319,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); if (TCP_SKB_CB(skb)->sacked) - flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); + flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, + &sack_rtt); if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; @@ -3382,7 +3392,8 @@ old_ack: * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { - flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); + flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, + &sack_rtt); tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); } -- cgit v0.10.2 From ed08495c31bb991de636d2488abaa50b39f2ff4a Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Mon, 22 Jul 2013 16:20:48 -0700 Subject: tcp: use RTT from SACK for RTO If RTT is not available because Karn's check has failed or no new packet is acked, use the RTT measured from SACK to estimate the RTO. The sender can continue to estimate the RTO during loss recovery or reordering event upon receiving non-partial ACKs. This also changes when the RTO is re-armed. Previously it is only re-armed when some data is cummulatively acknowledged (i.e., SND.UNA advances), but now it is re-armed whenever RTT estimator is updated. This feature is particularly useful to reduce spurious timeout for buffer bloat including cellular carriers [1], and RTT estimation on reordering events. [1] "An In-depth Study of LTE: Effect of Network Protocol and Application Behavior on Performance", In Proc. of SIGCOMM 2013 Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b85bc7c..b61274b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2800,8 +2800,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, tcp_xmit_retransmit_queue(sk); } -static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, - s32 seq_rtt) +static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, + s32 seq_rtt, s32 sack_rtt) { const struct tcp_sock *tp = tcp_sk(sk); @@ -2813,6 +2813,9 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, if (flag & FLAG_RETRANS_DATA_ACKED) seq_rtt = -1; + if (seq_rtt < 0) + seq_rtt = sack_rtt; + /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the @@ -2823,13 +2826,14 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; if (seq_rtt < 0) - return; + return false; tcp_rtt_estimator(sk, seq_rtt); tcp_set_rto(sk); /* RFC6298: only reset backoff on valid RTT measurement. */ inet_csk(sk)->icsk_backoff = 0; + return true; } /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ @@ -2840,7 +2844,7 @@ static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) if (tp->lsndtime && !tp->total_retrans) seq_rtt = tcp_time_stamp - tp->lsndtime; - tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt); + tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); } static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) @@ -2929,7 +2933,7 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) * arrived at the other end. */ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, - u32 prior_snd_una) + u32 prior_snd_una, s32 sack_rtt) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -3019,6 +3023,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; + if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) || + (flag & FLAG_ACKED)) + tcp_rearm_rto(sk); + if (flag & FLAG_ACKED) { const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; @@ -3028,9 +3036,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, tcp_mtup_probe_success(sk); } - tcp_ack_update_rtt(sk, flag, seq_rtt); - tcp_rearm_rto(sk); - if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); } else { @@ -3339,7 +3344,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ acked = tp->packets_out; - flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); + flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt); acked -= tp->packets_out; if (tcp_ack_is_dubious(sk, flag)) { -- cgit v0.10.2 From 906dc186e0202bbffe8b3f69caddeda4b0a17420 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Fri, 19 Jul 2013 17:20:07 +0200 Subject: vxlan fdb replace an existing entry Add support to replace an existing entry found in the vxlan fdb database. The entry in question is identified by its unicast mac address and the destination information is changed. If the entry is not found, it is added in the forwarding database. This is similar to changing an entry in the neighbour table. Multicast mac addresses can not be changed with the replace option. This is useful for virtual machine migration when the destination of a target virtual machine changes. The replace feature can be used instead of delete followed by add. Resubmitted because net-next was closed last week. Signed-off-by: Thomas Richter Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a5ba8dd..f401c1a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -407,6 +407,26 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, return NULL; } +/* Replace destination of unicast mac */ +static int vxlan_fdb_replace(struct vxlan_fdb *f, + __be32 ip, __be16 port, __u32 vni, __u32 ifindex) +{ + struct vxlan_rdst *rd; + + rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); + if (rd) + return 0; + + rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); + if (!rd) + return 0; + rd->remote_ip = ip; + rd->remote_port = port; + rd->remote_vni = vni; + rd->remote_ifindex = ifindex; + return 1; +} + /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, __be32 ip, __be16 port, __u32 vni, __u32 ifindex) @@ -457,6 +477,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, f->updated = jiffies; notify = 1; } + if ((flags & NLM_F_REPLACE)) { + /* Only change unicasts */ + if (!(is_multicast_ether_addr(f->eth_addr) || + is_zero_ether_addr(f->eth_addr))) { + int rc = vxlan_fdb_replace(f, ip, port, vni, + ifindex); + + if (rc < 0) + return rc; + notify |= rc; + } else + return -EOPNOTSUPP; + } if ((flags & NLM_F_APPEND) && (is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { @@ -473,6 +506,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax) return -ENOSPC; + /* Disallow replace to add a multicast entry */ + if ((flags & NLM_F_REPLACE) && + (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) + return -EOPNOTSUPP; + netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip); f = kmalloc(sizeof(*f), GFP_ATOMIC); if (!f) -- cgit v0.10.2 From ab2cfbb2bddb7c7bc4394e52e91044d5ff645cb4 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Fri, 19 Jul 2013 17:20:08 +0200 Subject: macvlan fdb replace support Add support for iproute2 command 'bridge fdb replace ...'. The rtnletlink call back function ndo_fdb_add will be called with the NLM_F_REPLACE flag set. Simply return -EOPNOTSUP. Resubmitted because net-next was closed last week. Signed-off-by: Thomas Richter Signed-off-by: David S. Miller diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 18373b6..74907f5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -597,6 +597,9 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (!vlan->port->passthru) return -EOPNOTSUPP; + if (flags & NLM_F_REPLACE) + return -EOPNOTSUPP; + if (is_unicast_ether_addr(addr)) err = dev_uc_add_excl(dev, addr); else if (is_multicast_ether_addr(addr)) -- cgit v0.10.2 From fc423ff00df3a19554414eed80aef9de9b50313e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 20 Jul 2013 12:13:52 +0200 Subject: team: add peer notification When port is enabled or disabled, allow to notify peers by unsolicitated NAs or gratuitous ARPs. Disabled by default. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index bff7e0b..0433ee9 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -622,6 +622,46 @@ static int team_change_mode(struct team *team, const char *kind) } +/********************* + * Peers notification + *********************/ + +static void team_notify_peers_work(struct work_struct *work) +{ + struct team *team; + + team = container_of(work, struct team, notify_peers.dw.work); + + if (!rtnl_trylock()) { + schedule_delayed_work(&team->notify_peers.dw, 0); + return; + } + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); + rtnl_unlock(); + if (!atomic_dec_and_test(&team->notify_peers.count_pending)) + schedule_delayed_work(&team->notify_peers.dw, + msecs_to_jiffies(team->notify_peers.interval)); +} + +static void team_notify_peers(struct team *team) +{ + if (!team->notify_peers.count || !netif_running(team->dev)) + return; + atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); + schedule_delayed_work(&team->notify_peers.dw, 0); +} + +static void team_notify_peers_init(struct team *team) +{ + INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work); +} + +static void team_notify_peers_fini(struct team *team) +{ + cancel_delayed_work_sync(&team->notify_peers.dw); +} + + /************************ * Rx path frame handler ************************/ @@ -846,6 +886,7 @@ static void team_port_enable(struct team *team, team_queue_override_port_add(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); + team_notify_peers(team); } static void __reconstruct_port_hlist(struct team *team, int rm_index) @@ -875,6 +916,7 @@ static void team_port_disable(struct team *team, team->en_port_count--; team_queue_override_port_del(team, port); team_adjust_ops(team); + team_notify_peers(team); } #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ @@ -1205,6 +1247,34 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) return team_change_mode(team, ctx->data.str_val); } +static int team_notify_peers_count_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->notify_peers.count; + return 0; +} + +static int team_notify_peers_count_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->notify_peers.count = ctx->data.u32_val; + return 0; +} + +static int team_notify_peers_interval_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->notify_peers.interval; + return 0; +} + +static int team_notify_peers_interval_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->notify_peers.interval = ctx->data.u32_val; + return 0; +} + static int team_port_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { @@ -1317,6 +1387,18 @@ static const struct team_option team_options[] = { .setter = team_mode_option_set, }, { + .name = "notify_peers_count", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_notify_peers_count_get, + .setter = team_notify_peers_count_set, + }, + { + .name = "notify_peers_interval", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_notify_peers_interval_get, + .setter = team_notify_peers_interval_set, + }, + { .name = "enabled", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, @@ -1396,6 +1478,9 @@ static int team_init(struct net_device *dev) INIT_LIST_HEAD(&team->option_list); INIT_LIST_HEAD(&team->option_inst_list); + + team_notify_peers_init(team); + err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); if (err) goto err_options_register; @@ -1406,6 +1491,7 @@ static int team_init(struct net_device *dev) return 0; err_options_register: + team_notify_peers_fini(team); team_queue_override_fini(team); err_team_queue_override_init: free_percpu(team->pcpu_stats); @@ -1425,6 +1511,7 @@ static void team_uninit(struct net_device *dev) __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + team_notify_peers_fini(team); team_queue_override_fini(team); mutex_unlock(&team->lock); } diff --git a/include/linux/if_team.h b/include/linux/if_team.h index f6156f9..b0b8368 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -10,9 +10,9 @@ #ifndef _LINUX_IF_TEAM_H_ #define _LINUX_IF_TEAM_H_ - #include #include +#include #include struct team_pcpu_stats { @@ -194,6 +194,12 @@ struct team { bool user_carrier_enabled; bool queue_override_enabled; struct list_head *qom_lists; /* array of queue override mapping lists */ + struct { + unsigned int count; + unsigned int interval; /* in ms */ + atomic_t count_pending; + struct delayed_work dw; + } notify_peers; long mode_priv[TEAM_MODE_PRIV_LONGS]; }; -- cgit v0.10.2 From 4aa5dee4d9997879adff858514844efab5a15a01 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 20 Jul 2013 12:13:53 +0200 Subject: net: convert resend IGMP to notifier event Until now, bond_resend_igmp_join_requests() looks for vlans attached to bonding device, bridge where bonding act as port manually. It does not care of other scenarios, like stacked bonds or team device above. Make this more generic and use netdev notifier to propagate the event to upper devices and to actually call ip_mc_rejoin_groups(). Signed-off-by: Jiri Pirko Acked-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07f257d4..ae9864c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -715,15 +715,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc) return err; } -static void __bond_resend_igmp_join_requests(struct net_device *dev) -{ - struct in_device *in_dev; - - in_dev = __in_dev_get_rcu(dev); - if (in_dev) - ip_mc_rejoin_groups(in_dev); -} - /* * Retrieve the list of registered multicast addresses for the bonding * device and retransmit an IGMP JOIN request to the current active @@ -731,33 +722,12 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev) */ static void bond_resend_igmp_join_requests(struct bonding *bond) { - struct net_device *bond_dev, *vlan_dev, *upper_dev; - struct vlan_entry *vlan; - - read_lock(&bond->lock); - rcu_read_lock(); - - bond_dev = bond->dev; - - /* rejoin all groups on bond device */ - __bond_resend_igmp_join_requests(bond_dev); - - /* - * if bond is enslaved to a bridge, - * then rejoin all groups on its master - */ - upper_dev = netdev_master_upper_dev_get_rcu(bond_dev); - if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE) - __bond_resend_igmp_join_requests(upper_dev); - - /* rejoin all groups on vlan devices */ - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q), - vlan->vlan_id); - if (vlan_dev) - __bond_resend_igmp_join_requests(vlan_dev); + if (!rtnl_trylock()) { + queue_delayed_work(bond->wq, &bond->mcast_work, 0); + return; } - rcu_read_unlock(); + call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev); + rtnl_unlock(); /* We use curr_slave_lock to protect against concurrent access to * igmp_retrans from multiple running instances of this function and @@ -3234,6 +3204,10 @@ static int bond_slave_netdev_event(unsigned long event, case NETDEV_FEAT_CHANGE: bond_compute_features(bond); break; + case NETDEV_RESEND_IGMP: + /* Propagate to master device */ + call_netdevice_notifiers(event, slave->bond->dev); + break; default: break; } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 0433ee9..2587dc8 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2785,6 +2785,10 @@ static int team_device_event(struct notifier_block *unused, case NETDEV_PRE_TYPE_CHANGE: /* Forbid to change type of underlaying device */ return NOTIFY_BAD; + case NETDEV_RESEND_IGMP: + /* Propagate to master device */ + call_netdevice_notifiers(event, port->team->dev); + break; } return NOTIFY_DONE; } diff --git a/include/linux/igmp.h b/include/linux/igmp.h index e3362b5..f47550d 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -129,6 +129,5 @@ extern void ip_mc_unmap(struct in_device *); extern void ip_mc_remap(struct in_device *); extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); -extern void ip_mc_rejoin_groups(struct in_device *in_dev); #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0741a1e..2bb2357 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1633,6 +1633,7 @@ struct packet_offload { #define NETDEV_NOTIFY_PEERS 0x0013 #define NETDEV_JOIN 0x0014 #define NETDEV_CHANGEUPPER 0x0015 +#define NETDEV_RESEND_IGMP 0x0016 extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 2fb2d88..03a92e1 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -459,6 +459,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, case NETDEV_NOTIFY_PEERS: case NETDEV_BONDING_FAILOVER: + case NETDEV_RESEND_IGMP: /* Propagate to vlan devices */ vlan_group_for_each_dev(grp, i, vlandev) call_netdevice_notifiers(event, vlandev); diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 3a3f371..2998dd1 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ return NOTIFY_BAD; + + case NETDEV_RESEND_IGMP: + /* Propagate to master device */ + call_netdevice_notifiers(event, br->dev); + break; } /* Events that may cause spanning tree to refresh */ diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index cd71190..375aca3 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1323,16 +1323,17 @@ out: EXPORT_SYMBOL(ip_mc_inc_group); /* - * Resend IGMP JOIN report; used for bonding. - * Called with rcu_read_lock() + * Resend IGMP JOIN report; used by netdev notifier. */ -void ip_mc_rejoin_groups(struct in_device *in_dev) +static void ip_mc_rejoin_groups(struct in_device *in_dev) { #ifdef CONFIG_IP_MULTICAST struct ip_mc_list *im; int type; - for_each_pmc_rcu(in_dev, im) { + ASSERT_RTNL(); + + for_each_pmc_rtnl(in_dev, im) { if (im->multiaddr == IGMP_ALL_HOSTS) continue; @@ -1349,7 +1350,6 @@ void ip_mc_rejoin_groups(struct in_device *in_dev) } #endif } -EXPORT_SYMBOL(ip_mc_rejoin_groups); /* * A socket has left a multicast group on device dev @@ -2735,8 +2735,42 @@ static struct pernet_operations igmp_net_ops = { .exit = igmp_net_exit, }; +static int igmp_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct in_device *in_dev; + + switch (event) { + case NETDEV_RESEND_IGMP: + in_dev = __in_dev_get_rtnl(dev); + if (in_dev) + ip_mc_rejoin_groups(in_dev); + break; + default: + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block igmp_notifier = { + .notifier_call = igmp_netdev_event, +}; + int __init igmp_mc_proc_init(void) { - return register_pernet_subsys(&igmp_net_ops); + int err; + + err = register_pernet_subsys(&igmp_net_ops); + if (err) + return err; + err = register_netdevice_notifier(&igmp_notifier); + if (err) + goto reg_notif_fail; + return 0; + +reg_notif_fail: + unregister_pernet_subsys(&igmp_net_ops); + return err; } #endif -- cgit v0.10.2 From 492b200efdd20b8fcfdac873f3cd8d4902386581 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 20 Jul 2013 12:13:54 +0200 Subject: team: add support for sending multicast rejoins Similar to what is implemented in bonding. User is able to ask team driver to send IGMP rejoins in case port is enabled or disabled. Using previously introduced netdev notifier. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 2587dc8..75159e4 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -662,6 +662,46 @@ static void team_notify_peers_fini(struct team *team) } +/******************************* + * Send multicast group rejoins + *******************************/ + +static void team_mcast_rejoin_work(struct work_struct *work) +{ + struct team *team; + + team = container_of(work, struct team, mcast_rejoin.dw.work); + + if (!rtnl_trylock()) { + schedule_delayed_work(&team->mcast_rejoin.dw, 0); + return; + } + call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); + rtnl_unlock(); + if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) + schedule_delayed_work(&team->mcast_rejoin.dw, + msecs_to_jiffies(team->mcast_rejoin.interval)); +} + +static void team_mcast_rejoin(struct team *team) +{ + if (!team->mcast_rejoin.count || !netif_running(team->dev)) + return; + atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); + schedule_delayed_work(&team->mcast_rejoin.dw, 0); +} + +static void team_mcast_rejoin_init(struct team *team) +{ + INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work); +} + +static void team_mcast_rejoin_fini(struct team *team) +{ + cancel_delayed_work_sync(&team->mcast_rejoin.dw); +} + + /************************ * Rx path frame handler ************************/ @@ -887,6 +927,7 @@ static void team_port_enable(struct team *team, if (team->ops.port_enabled) team->ops.port_enabled(team, port); team_notify_peers(team); + team_mcast_rejoin(team); } static void __reconstruct_port_hlist(struct team *team, int rm_index) @@ -917,6 +958,7 @@ static void team_port_disable(struct team *team, team_queue_override_port_del(team, port); team_adjust_ops(team); team_notify_peers(team); + team_mcast_rejoin(team); } #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ @@ -1275,6 +1317,34 @@ static int team_notify_peers_interval_set(struct team *team, return 0; } +static int team_mcast_rejoin_count_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->mcast_rejoin.count; + return 0; +} + +static int team_mcast_rejoin_count_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->mcast_rejoin.count = ctx->data.u32_val; + return 0; +} + +static int team_mcast_rejoin_interval_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->mcast_rejoin.interval; + return 0; +} + +static int team_mcast_rejoin_interval_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->mcast_rejoin.interval = ctx->data.u32_val; + return 0; +} + static int team_port_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { @@ -1399,6 +1469,18 @@ static const struct team_option team_options[] = { .setter = team_notify_peers_interval_set, }, { + .name = "mcast_rejoin_count", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_mcast_rejoin_count_get, + .setter = team_mcast_rejoin_count_set, + }, + { + .name = "mcast_rejoin_interval", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_mcast_rejoin_interval_get, + .setter = team_mcast_rejoin_interval_set, + }, + { .name = "enabled", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, @@ -1480,6 +1562,7 @@ static int team_init(struct net_device *dev) INIT_LIST_HEAD(&team->option_inst_list); team_notify_peers_init(team); + team_mcast_rejoin_init(team); err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); if (err) @@ -1491,6 +1574,7 @@ static int team_init(struct net_device *dev) return 0; err_options_register: + team_mcast_rejoin_fini(team); team_notify_peers_fini(team); team_queue_override_fini(team); err_team_queue_override_init: @@ -1511,6 +1595,7 @@ static void team_uninit(struct net_device *dev) __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + team_mcast_rejoin_fini(team); team_notify_peers_fini(team); team_queue_override_fini(team); mutex_unlock(&team->lock); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index b0b8368..a899dc2 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -200,6 +200,12 @@ struct team { atomic_t count_pending; struct delayed_work dw; } notify_peers; + struct { + unsigned int count; + unsigned int interval; /* in ms */ + atomic_t count_pending; + struct delayed_work dw; + } mcast_rejoin; long mode_priv[TEAM_MODE_PRIV_LONGS]; }; -- cgit v0.10.2 From c4854ec8c483fb8deea7400c11cd7fd7c03e306c Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Sat, 20 Jul 2013 15:09:28 +0300 Subject: ipmr: change the prototype of ip_mr_forward(). This patch changes the prototpye of the ip_mr_forward() method to return void instead of int. The ip_mr_forward() method always returns 0; moreover, the return value of this method is not checked anywhere. Signed-off-by: Rami Rosen Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 132a096..bacc0bc 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -127,9 +127,9 @@ static struct kmem_cache *mrt_cachep __read_mostly; static struct mr_table *ipmr_new_table(struct net *net, u32 id); static void ipmr_free_table(struct mr_table *mrt); -static int ip_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc_cache *cache, - int local); +static void ip_mr_forward(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *cache, + int local); static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, @@ -1795,9 +1795,9 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) /* "local" means that we should preserve one skb (for local delivery) */ -static int ip_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc_cache *cache, - int local) +static void ip_mr_forward(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *cache, + int local) { int psend = -1; int vif, ct; @@ -1903,14 +1903,13 @@ last_forward: ipmr_queue_xmit(net, mrt, skb2, cache, psend); } else { ipmr_queue_xmit(net, mrt, skb, cache, psend); - return 0; + return; } } dont_forward: if (!local) kfree_skb(skb); - return 0; } static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb) -- cgit v0.10.2 From 2b52c3ada8f4391d82d8088d9169ac2a8c4d0411 Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Sun, 21 Jul 2013 03:00:31 +0300 Subject: ip6mr: change the prototype of ip6_mr_forward(). This patch changes the prototpye of the ip6_mr_forward() method to return void instead of int. The ip6_mr_forward() method always returns 0; moreover, the return value of this method is not checked anywhere. Signed-off-by: Rami Rosen Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 583e8d4..6d78615 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -110,8 +110,8 @@ static struct kmem_cache *mrt_cachep __read_mostly; static struct mr6_table *ip6mr_new_table(struct net *net, u32 id); static void ip6mr_free_table(struct mr6_table *mrt); -static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, - struct sk_buff *skb, struct mfc6_cache *cache); +static void ip6_mr_forward(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *cache); static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, mifi_t mifi, int assert); static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, @@ -2069,8 +2069,8 @@ static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev) return ct; } -static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, - struct sk_buff *skb, struct mfc6_cache *cache) +static void ip6_mr_forward(struct net *net, struct mr6_table *mrt, + struct sk_buff *skb, struct mfc6_cache *cache) { int psend = -1; int vif, ct; @@ -2151,12 +2151,11 @@ forward: last_forward: if (psend != -1) { ip6mr_forward2(net, mrt, skb, cache, psend); - return 0; + return; } dont_forward: kfree_skb(skb); - return 0; } -- cgit v0.10.2 From b9959fd3b0fa8ee5f45012ae5258d87ee6852baa Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Sun, 21 Jul 2013 10:46:25 +0800 Subject: vti: switch to new ip tunnel code GRE tunnel and IPIP tunnel already switched to the new ip tunnel code, VTI tunnel can use it too. Cc: Pravin B Shelar Cc: Stephen Hemminger Cc: Saurabh Mohan Cc: "David S. Miller" Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 17cc0ff..79b263d 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -44,176 +44,10 @@ #include #include -#define HASH_SIZE 16 -#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1)) - static struct rtnl_link_ops vti_link_ops __read_mostly; static int vti_net_id __read_mostly; -struct vti_net { - struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE]; - struct ip_tunnel __rcu *tunnels_r[HASH_SIZE]; - struct ip_tunnel __rcu *tunnels_l[HASH_SIZE]; - struct ip_tunnel __rcu *tunnels_wc[1]; - struct ip_tunnel __rcu **tunnels[4]; - - struct net_device *fb_tunnel_dev; -}; - -static int vti_fb_tunnel_init(struct net_device *dev); static int vti_tunnel_init(struct net_device *dev); -static void vti_tunnel_setup(struct net_device *dev); -static void vti_dev_free(struct net_device *dev); -static int vti_tunnel_bind_dev(struct net_device *dev); - -#define VTI_XMIT(stats1, stats2) do { \ - int err; \ - int pkt_len = skb->len; \ - err = dst_output(skb); \ - if (net_xmit_eval(err) == 0) { \ - u64_stats_update_begin(&(stats1)->syncp); \ - (stats1)->tx_bytes += pkt_len; \ - (stats1)->tx_packets++; \ - u64_stats_update_end(&(stats1)->syncp); \ - } else { \ - (stats2)->tx_errors++; \ - (stats2)->tx_aborted_errors++; \ - } \ -} while (0) - - -static struct ip_tunnel *vti_tunnel_lookup(struct net *net, - __be32 remote, __be32 local) -{ - unsigned h0 = HASH(remote); - unsigned h1 = HASH(local); - struct ip_tunnel *t; - struct vti_net *ipn = net_generic(net, vti_net_id); - - for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1]) - if (local == t->parms.iph.saddr && - remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) - return t; - for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0]) - if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) - return t; - - for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1]) - if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) - return t; - - for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0]) - if (t && (t->dev->flags&IFF_UP)) - return t; - return NULL; -} - -static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn, - struct ip_tunnel_parm *parms) -{ - __be32 remote = parms->iph.daddr; - __be32 local = parms->iph.saddr; - unsigned h = 0; - int prio = 0; - - if (remote) { - prio |= 2; - h ^= HASH(remote); - } - if (local) { - prio |= 1; - h ^= HASH(local); - } - return &ipn->tunnels[prio][h]; -} - -static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn, - struct ip_tunnel *t) -{ - return __vti_bucket(ipn, &t->parms); -} - -static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t) -{ - struct ip_tunnel __rcu **tp; - struct ip_tunnel *iter; - - for (tp = vti_bucket(ipn, t); - (iter = rtnl_dereference(*tp)) != NULL; - tp = &iter->next) { - if (t == iter) { - rcu_assign_pointer(*tp, t->next); - break; - } - } -} - -static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t) -{ - struct ip_tunnel __rcu **tp = vti_bucket(ipn, t); - - rcu_assign_pointer(t->next, rtnl_dereference(*tp)); - rcu_assign_pointer(*tp, t); -} - -static struct ip_tunnel *vti_tunnel_locate(struct net *net, - struct ip_tunnel_parm *parms, - int create) -{ - __be32 remote = parms->iph.daddr; - __be32 local = parms->iph.saddr; - struct ip_tunnel *t, *nt; - struct ip_tunnel __rcu **tp; - struct net_device *dev; - char name[IFNAMSIZ]; - struct vti_net *ipn = net_generic(net, vti_net_id); - - for (tp = __vti_bucket(ipn, parms); - (t = rtnl_dereference(*tp)) != NULL; - tp = &t->next) { - if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) - return t; - } - if (!create) - return NULL; - - if (parms->name[0]) - strlcpy(name, parms->name, IFNAMSIZ); - else - strcpy(name, "vti%d"); - - dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup); - if (dev == NULL) - return NULL; - - dev_net_set(dev, net); - - nt = netdev_priv(dev); - nt->parms = *parms; - dev->rtnl_link_ops = &vti_link_ops; - - vti_tunnel_bind_dev(dev); - - if (register_netdevice(dev) < 0) - goto failed_free; - - dev_hold(dev); - vti_tunnel_link(ipn, nt); - return nt; - -failed_free: - free_netdev(dev); - return NULL; -} - -static void vti_tunnel_uninit(struct net_device *dev) -{ - struct net *net = dev_net(dev); - struct vti_net *ipn = net_generic(net, vti_net_id); - - vti_tunnel_unlink(ipn, netdev_priv(dev)); - dev_put(dev); -} static int vti_err(struct sk_buff *skb, u32 info) { @@ -222,6 +56,8 @@ static int vti_err(struct sk_buff *skb, u32 info) * 8 bytes of packet payload. It means, that precise relaying of * ICMP in the real Internet is absolutely infeasible. */ + struct net *net = dev_net(skb->dev); + struct ip_tunnel_net *itn = net_generic(net, vti_net_id); struct iphdr *iph = (struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; @@ -252,7 +88,8 @@ static int vti_err(struct sk_buff *skb, u32 info) err = -ENOENT; - t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); + t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, + iph->daddr, iph->saddr, 0); if (t == NULL) goto out; @@ -281,8 +118,11 @@ static int vti_rcv(struct sk_buff *skb) { struct ip_tunnel *tunnel; const struct iphdr *iph = ip_hdr(skb); + struct net *net = dev_net(skb->dev); + struct ip_tunnel_net *itn = net_generic(net, vti_net_id); - tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr); + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, + iph->saddr, iph->daddr, 0); if (tunnel != NULL) { struct pcpu_tstats *tstats; @@ -311,7 +151,6 @@ static int vti_rcv(struct sk_buff *skb) static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); - struct pcpu_tstats *tstats; struct iphdr *tiph = &tunnel->parms.iph; u8 tos; struct rtable *rt; /* Route to the other host */ @@ -319,6 +158,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) struct iphdr *old_iph = ip_hdr(skb); __be32 dst = tiph->daddr; struct flowi4 fl4; + int err; if (skb->protocol != htons(ETH_P_IP)) goto tx_error; @@ -367,8 +207,10 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset(skb); skb->dev = skb_dst(skb)->dev; - tstats = this_cpu_ptr(dev->tstats); - VTI_XMIT(tstats, &dev->stats); + err = dst_output(skb); + if (net_xmit_eval(err) == 0) + err = skb->len; + iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; tx_error_icmp: @@ -379,198 +221,57 @@ tx_error: return NETDEV_TX_OK; } -static int vti_tunnel_bind_dev(struct net_device *dev) -{ - struct net_device *tdev = NULL; - struct ip_tunnel *tunnel; - struct iphdr *iph; - - tunnel = netdev_priv(dev); - iph = &tunnel->parms.iph; - - if (iph->daddr) { - struct rtable *rt; - struct flowi4 fl4; - memset(&fl4, 0, sizeof(fl4)); - flowi4_init_output(&fl4, tunnel->parms.link, - be32_to_cpu(tunnel->parms.i_key), - RT_TOS(iph->tos), RT_SCOPE_UNIVERSE, - IPPROTO_IPIP, 0, - iph->daddr, iph->saddr, 0, 0); - rt = ip_route_output_key(dev_net(dev), &fl4); - if (!IS_ERR(rt)) { - tdev = rt->dst.dev; - ip_rt_put(rt); - } - dev->flags |= IFF_POINTOPOINT; - } - - if (!tdev && tunnel->parms.link) - tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); - - if (tdev) { - dev->hard_header_len = tdev->hard_header_len + - sizeof(struct iphdr); - dev->mtu = tdev->mtu; - } - dev->iflink = tunnel->parms.link; - return dev->mtu; -} - static int vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip_tunnel_parm p; - struct ip_tunnel *t; - struct net *net = dev_net(dev); - struct vti_net *ipn = net_generic(net, vti_net_id); - - switch (cmd) { - case SIOCGETTUNNEL: - t = NULL; - if (dev == ipn->fb_tunnel_dev) { - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, - sizeof(p))) { - err = -EFAULT; - break; - } - t = vti_tunnel_locate(net, &p, 0); - } - if (t == NULL) - t = netdev_priv(dev); - memcpy(&p, &t->parms, sizeof(p)); - p.i_flags |= GRE_KEY | VTI_ISVTI; - p.o_flags |= GRE_KEY; - if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) - err = -EFAULT; - break; - - case SIOCADDTUNNEL: - case SIOCCHGTUNNEL: - err = -EPERM; - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - goto done; - err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) - goto done; + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) + return -EFAULT; - err = -EINVAL; + if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP || p.iph.ihl != 5) - goto done; - - t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL); - - if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { - if (t != NULL) { - if (t->dev != dev) { - err = -EEXIST; - break; - } - } else { - if (((dev->flags&IFF_POINTOPOINT) && - !p.iph.daddr) || - (!(dev->flags&IFF_POINTOPOINT) && - p.iph.daddr)) { - err = -EINVAL; - break; - } - t = netdev_priv(dev); - vti_tunnel_unlink(ipn, t); - synchronize_net(); - t->parms.iph.saddr = p.iph.saddr; - t->parms.iph.daddr = p.iph.daddr; - t->parms.i_key = p.i_key; - t->parms.o_key = p.o_key; - t->parms.iph.protocol = IPPROTO_IPIP; - memcpy(dev->dev_addr, &p.iph.saddr, 4); - memcpy(dev->broadcast, &p.iph.daddr, 4); - vti_tunnel_link(ipn, t); - netdev_state_change(dev); - } - } - - if (t) { - err = 0; - if (cmd == SIOCCHGTUNNEL) { - t->parms.i_key = p.i_key; - t->parms.o_key = p.o_key; - if (t->parms.link != p.link) { - t->parms.link = p.link; - vti_tunnel_bind_dev(dev); - netdev_state_change(dev); - } - } - p.i_flags |= GRE_KEY | VTI_ISVTI; - p.o_flags |= GRE_KEY; - if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, - sizeof(p))) - err = -EFAULT; - } else - err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); - break; + return -EINVAL; + } - case SIOCDELTUNNEL: - err = -EPERM; - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - goto done; - - if (dev == ipn->fb_tunnel_dev) { - err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, - sizeof(p))) - goto done; - err = -ENOENT; - - t = vti_tunnel_locate(net, &p, 0); - if (t == NULL) - goto done; - err = -EPERM; - if (t->dev == ipn->fb_tunnel_dev) - goto done; - dev = t->dev; - } - unregister_netdevice(dev); - err = 0; - break; + err = ip_tunnel_ioctl(dev, &p, cmd); + if (err) + return err; - default: - err = -EINVAL; + if (cmd != SIOCDELTUNNEL) { + p.i_flags |= GRE_KEY | VTI_ISVTI; + p.o_flags |= GRE_KEY; } -done: - return err; -} - -static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu) -{ - if (new_mtu < 68 || new_mtu > 0xFFF8) - return -EINVAL; - dev->mtu = new_mtu; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) + return -EFAULT; return 0; } static const struct net_device_ops vti_netdev_ops = { .ndo_init = vti_tunnel_init, - .ndo_uninit = vti_tunnel_uninit, + .ndo_uninit = ip_tunnel_uninit, .ndo_start_xmit = vti_tunnel_xmit, .ndo_do_ioctl = vti_tunnel_ioctl, - .ndo_change_mtu = vti_tunnel_change_mtu, + .ndo_change_mtu = ip_tunnel_change_mtu, .ndo_get_stats64 = ip_tunnel_get_stats64, }; -static void vti_dev_free(struct net_device *dev) +static void vti_tunnel_setup(struct net_device *dev) { - free_percpu(dev->tstats); - free_netdev(dev); + dev->netdev_ops = &vti_netdev_ops; + ip_tunnel_setup(dev, vti_net_id); } -static void vti_tunnel_setup(struct net_device *dev) +static int vti_tunnel_init(struct net_device *dev) { - dev->netdev_ops = &vti_netdev_ops; - dev->destructor = vti_dev_free; + struct ip_tunnel *tunnel = netdev_priv(dev); + struct iphdr *iph = &tunnel->parms.iph; + + memcpy(dev->dev_addr, &iph->saddr, 4); + memcpy(dev->broadcast, &iph->daddr, 4); dev->type = ARPHRD_TUNNEL; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); @@ -581,38 +282,18 @@ static void vti_tunnel_setup(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; dev->features |= NETIF_F_LLTX; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; -} -static int vti_tunnel_init(struct net_device *dev) -{ - struct ip_tunnel *tunnel = netdev_priv(dev); - - tunnel->dev = dev; - strcpy(tunnel->parms.name, dev->name); - - memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); - memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); - - dev->tstats = alloc_percpu(struct pcpu_tstats); - if (!dev->tstats) - return -ENOMEM; - - return 0; + return ip_tunnel_init(dev); } -static int __net_init vti_fb_tunnel_init(struct net_device *dev) +static void __net_init vti_fb_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; - struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id); iph->version = 4; iph->protocol = IPPROTO_IPIP; iph->ihl = 5; - - dev_hold(dev); - rcu_assign_pointer(ipn->tunnels_wc[0], tunnel); - return 0; } static struct xfrm_tunnel vti_handler __read_mostly = { @@ -621,76 +302,30 @@ static struct xfrm_tunnel vti_handler __read_mostly = { .priority = 1, }; -static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head) -{ - int prio; - - for (prio = 1; prio < 4; prio++) { - int h; - for (h = 0; h < HASH_SIZE; h++) { - struct ip_tunnel *t; - - t = rtnl_dereference(ipn->tunnels[prio][h]); - while (t != NULL) { - unregister_netdevice_queue(t->dev, head); - t = rtnl_dereference(t->next); - } - } - } -} - static int __net_init vti_init_net(struct net *net) { int err; - struct vti_net *ipn = net_generic(net, vti_net_id); - - ipn->tunnels[0] = ipn->tunnels_wc; - ipn->tunnels[1] = ipn->tunnels_l; - ipn->tunnels[2] = ipn->tunnels_r; - ipn->tunnels[3] = ipn->tunnels_r_l; - - ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), - "ip_vti0", - vti_tunnel_setup); - if (!ipn->fb_tunnel_dev) { - err = -ENOMEM; - goto err_alloc_dev; - } - dev_net_set(ipn->fb_tunnel_dev, net); - - err = vti_fb_tunnel_init(ipn->fb_tunnel_dev); - if (err) - goto err_reg_dev; - ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops; + struct ip_tunnel_net *itn; - err = register_netdev(ipn->fb_tunnel_dev); + err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0"); if (err) - goto err_reg_dev; + return err; + itn = net_generic(net, vti_net_id); + vti_fb_tunnel_init(itn->fb_tunnel_dev); return 0; - -err_reg_dev: - vti_dev_free(ipn->fb_tunnel_dev); -err_alloc_dev: - /* nothing */ - return err; } static void __net_exit vti_exit_net(struct net *net) { - struct vti_net *ipn = net_generic(net, vti_net_id); - LIST_HEAD(list); - - rtnl_lock(); - vti_destroy_tunnels(ipn, &list); - unregister_netdevice_many(&list); - rtnl_unlock(); + struct ip_tunnel_net *itn = net_generic(net, vti_net_id); + ip_tunnel_delete_net(itn); } static struct pernet_operations vti_net_ops = { .init = vti_init_net, .exit = vti_exit_net, .id = &vti_net_id, - .size = sizeof(struct vti_net), + .size = sizeof(struct ip_tunnel_net), }; static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -728,78 +363,19 @@ static void vti_netlink_parms(struct nlattr *data[], static int vti_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct ip_tunnel *nt; - struct net *net = dev_net(dev); - struct vti_net *ipn = net_generic(net, vti_net_id); - int mtu; - int err; - - nt = netdev_priv(dev); - vti_netlink_parms(data, &nt->parms); - - if (vti_tunnel_locate(net, &nt->parms, 0)) - return -EEXIST; + struct ip_tunnel_parm parms; - mtu = vti_tunnel_bind_dev(dev); - if (!tb[IFLA_MTU]) - dev->mtu = mtu; - - err = register_netdevice(dev); - if (err) - goto out; - - dev_hold(dev); - vti_tunnel_link(ipn, nt); - -out: - return err; + vti_netlink_parms(data, &parms); + return ip_tunnel_newlink(dev, tb, &parms); } static int vti_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct ip_tunnel *t, *nt; - struct net *net = dev_net(dev); - struct vti_net *ipn = net_generic(net, vti_net_id); struct ip_tunnel_parm p; - int mtu; - - if (dev == ipn->fb_tunnel_dev) - return -EINVAL; - nt = netdev_priv(dev); vti_netlink_parms(data, &p); - - t = vti_tunnel_locate(net, &p, 0); - - if (t) { - if (t->dev != dev) - return -EEXIST; - } else { - t = nt; - - vti_tunnel_unlink(ipn, t); - t->parms.iph.saddr = p.iph.saddr; - t->parms.iph.daddr = p.iph.daddr; - t->parms.i_key = p.i_key; - t->parms.o_key = p.o_key; - if (dev->type != ARPHRD_ETHER) { - memcpy(dev->dev_addr, &p.iph.saddr, 4); - memcpy(dev->broadcast, &p.iph.daddr, 4); - } - vti_tunnel_link(ipn, t); - netdev_state_change(dev); - } - - if (t->parms.link != p.link) { - t->parms.link = p.link; - mtu = vti_tunnel_bind_dev(dev); - if (!tb[IFLA_MTU]) - dev->mtu = mtu; - netdev_state_change(dev); - } - - return 0; + return ip_tunnel_changelink(dev, tb, &p); } static size_t vti_get_size(const struct net_device *dev) @@ -865,7 +441,7 @@ static int __init vti_init(void) err = xfrm4_mode_tunnel_input_register(&vti_handler); if (err < 0) { unregister_pernet_device(&vti_net_ops); - pr_info(KERN_INFO "vti init: can't register tunnel\n"); + pr_info("vti init: can't register tunnel\n"); } err = rtnl_link_register(&vti_link_ops); -- cgit v0.10.2 From 45fe142cefa864b685615bcb930159f6749c3667 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 28 Jun 2013 08:05:06 -0700 Subject: iwl3945: better skb management in rx path Steinar reported reallocations of skb->head with IPv6, leading to a warning in skb_try_coalesce() It turns out iwl3945 has several problems : 1) skb->truesize is underestimated. We really consume PAGE_SIZE bytes for a fragment, not the frame length. 2) 128 bytes of initial headroom is a bit low and forces reallocations. 3) We can avoid consuming a full page for small enough frames. Reported-by: Steinar H. Gunderson Signed-off-by: Eric Dumazet Cc: Paul Stewart Acked-by: Stanislaw Gruszka Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c index c092033..f09e257 100644 --- a/drivers/net/wireless/iwlegacy/3945.c +++ b/drivers/net/wireless/iwlegacy/3945.c @@ -475,6 +475,8 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header) } } +#define SMALL_PACKET_SIZE 256 + static void il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, struct ieee80211_rx_status *stats) @@ -483,14 +485,13 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt); struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt); - u16 len = le16_to_cpu(rx_hdr->len); + u32 len = le16_to_cpu(rx_hdr->len); struct sk_buff *skb; __le16 fc = hdr->frame_control; + u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order; /* We received data from the HW, so stop the watchdog */ - if (unlikely - (len + IL39_RX_FRAME_SIZE > - PAGE_SIZE << il->hw_params.rx_page_order)) { + if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) { D_DROP("Corruption detected!\n"); return; } @@ -506,26 +507,32 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, D_INFO("Woke queues - frame received on passive channel\n"); } - skb = dev_alloc_skb(128); + skb = dev_alloc_skb(SMALL_PACKET_SIZE); if (!skb) { IL_ERR("dev_alloc_skb failed\n"); return; } if (!il3945_mod_params.sw_crypto) - il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb), + il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt, le32_to_cpu(rx_end->status), stats); - skb_add_rx_frag(skb, 0, rxb->page, - (void *)rx_hdr->payload - (void *)pkt, len, - len); - + /* If frame is small enough to fit into skb->head, copy it + * and do not consume a full page + */ + if (len <= SMALL_PACKET_SIZE) { + memcpy(skb_put(skb, len), rx_hdr->payload, len); + } else { + skb_add_rx_frag(skb, 0, rxb->page, + (void *)rx_hdr->payload - (void *)pkt, len, + fraglen); + il->alloc_rxb_page--; + rxb->page = NULL; + } il_update_stats(il, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(il->hw, skb); - il->alloc_rxb_page--; - rxb->page = NULL; } #define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) -- cgit v0.10.2 From 0e15370dbc7ab4d4ffd006620024ac3a52cc8c43 Mon Sep 17 00:00:00 2001 From: Huawei Yang Date: Mon, 22 Jul 2013 19:17:38 -0700 Subject: mwifiex: remove stop_net_dev_queue operation in AP forwarding Under uAP mode mwifiex may stop all net tx queues on forwarding packets. This may stop some tx queues and they never have chance to be waked up. There is also no need to check tx_pending and stop queues here. Because local host has such kind of check when transmitting packets and it's not proper to have forwarding affect local transmitting. Signed-off-by: Huawei Yang Reviewed-by: Avinash Patil Acked-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index a018e42..11df2b2 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -95,10 +95,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, atomic_inc(&adapter->tx_pending); atomic_inc(&adapter->pending_bridged_pkts); - if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) { - mwifiex_set_trans_start(priv->netdev); - mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter); - } return; } -- cgit v0.10.2 From f0cb84f8a1925ad1ff0a10c345b83db5f8043428 Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Mon, 22 Jul 2013 19:17:39 -0700 Subject: mwifiex: rename pkt_count to ba_pkt_count in mwifiex_ra_list_tbl struct pkt_count is used to determine if BA can be formed on this RA list by comparing it with randomly generated BA threshold. The pkt_count variable name here is ambiguous and does not reflect its usage correctly. Rename it to ba_pkt_count. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 3da73d3..dff95fe 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -207,7 +207,7 @@ struct mwifiex_ra_list_tbl { u32 total_pkts_size; u32 is_11n_enabled; u16 max_amsdu; - u16 pkt_count; + u16 ba_pkt_count; u8 ba_packet_thr; }; diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 944e884..9c1eeee 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -188,7 +188,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra) ra_list, ra_list->is_11n_enabled); if (ra_list->is_11n_enabled) { - ra_list->pkt_count = 0; + ra_list->ba_pkt_count = 0; ra_list->ba_packet_thr = mwifiex_get_random_ba_threshold(); } @@ -680,7 +680,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, skb_queue_tail(&ra_list->skb_head, skb); ra_list->total_pkts_size += skb->len; - ra_list->pkt_count++; + ra_list->ba_pkt_count++; if (atomic_read(&priv->wmm.highest_queued_prio) < tos_to_tid_inv[tid_down]) @@ -1063,7 +1063,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, skb_queue_tail(&ptr->skb_head, skb); ptr->total_pkts_size += skb->len; - ptr->pkt_count++; + ptr->ba_pkt_count++; tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); @@ -1224,7 +1224,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) mwifiex_send_single_packet() */ } else { if (mwifiex_is_ampdu_allowed(priv, tid) && - ptr->pkt_count > ptr->ba_packet_thr) { + ptr->ba_pkt_count > ptr->ba_packet_thr) { if (mwifiex_space_avail_for_new_ba_stream(adapter)) { mwifiex_create_ba_tbl(priv, ptr->ra, tid, BA_SETUP_INPROGRESS); -- cgit v0.10.2 From c7d9ed9ec9b2947a9c5dc5e2c7afce1fd3ad82cc Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Mon, 22 Jul 2013 19:17:40 -0700 Subject: mwifiex: maintain outstanding packet count for RA list instead of packet size Maintain total outstanding packet count for RA list instead of total outstanding size as packet count metric seems more reasonable for checking threshold etc. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index a78e065..8f9f542 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -189,7 +189,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_src = skb_dequeue(&pra_list->skb_head); - pra_list->total_pkts_size -= skb_src->len; + pra_list->total_pkt_count--; atomic_dec(&priv->wmm.tx_pkts_queued); @@ -268,7 +268,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_queue_tail(&pra_list->skb_head, skb_aggr); - pra_list->total_pkts_size += skb_aggr->len; + pra_list->total_pkt_count++; atomic_inc(&priv->wmm.tx_pkts_queued); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index dff95fe..761e592 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -204,11 +204,11 @@ struct mwifiex_ra_list_tbl { struct list_head list; struct sk_buff_head skb_head; u8 ra[ETH_ALEN]; - u32 total_pkts_size; u32 is_11n_enabled; u16 max_amsdu; u16 ba_pkt_count; u8 ba_packet_thr; + u16 total_pkt_count; }; struct mwifiex_tid_tbl { diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 9c1eeee..2e8f9cd 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -120,7 +120,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra) memcpy(ra_list->ra, ra, ETH_ALEN); - ra_list->total_pkts_size = 0; + ra_list->total_pkt_count = 0; dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list); @@ -679,8 +679,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, skb_queue_tail(&ra_list->skb_head, skb); - ra_list->total_pkts_size += skb->len; ra_list->ba_pkt_count++; + ra_list->total_pkt_count++; if (atomic_read(&priv->wmm.highest_queued_prio) < tos_to_tid_inv[tid_down]) @@ -1037,7 +1037,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, tx_info = MWIFIEX_SKB_TXCB(skb); dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb); - ptr->total_pkts_size -= skb->len; + ptr->total_pkt_count--; if (!skb_queue_empty(&ptr->skb_head)) skb_next = skb_peek(&ptr->skb_head); @@ -1062,7 +1062,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, skb_queue_tail(&ptr->skb_head, skb); - ptr->total_pkts_size += skb->len; + ptr->total_pkt_count++; ptr->ba_pkt_count++; tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, -- cgit v0.10.2 From 61c873045f74b16317c88e2931637329a6844564 Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Mon, 22 Jul 2013 19:17:41 -0700 Subject: mwifiex: delete AP TX queues when bridged packets reach threshold Delete packets from TX queues for this mwifiex_private structure when bridged packet count reaches maximum threshold. Bridged packets from each RA List are deleted till they fall to low threshold of 128. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index 94cc09d..a599347 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -75,7 +75,8 @@ #define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0) #define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1) -#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024 +#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024 +#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128 enum mwifiex_bss_type { MWIFIEX_BSS_TYPE_STA = 0, diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index caaf4bd..fb86cec1 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -135,6 +135,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->csa_chan = 0; priv->csa_expire_time = 0; + priv->del_list_idx = 0; return mwifiex_add_bss_prio_tbl(priv); } diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 761e592..5db0543 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -515,6 +515,7 @@ struct mwifiex_private { bool scan_aborting; u8 csa_chan; unsigned long csa_expire_time; + u8 del_list_idx; }; enum mwifiex_ba_status { diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 11df2b2..1cfe5a7 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -24,6 +24,69 @@ #include "11n_aggr.h" #include "11n_rxreorder.h" +/* This function checks if particular RA list has packets more than low bridge + * packet threshold and then deletes packet from this RA list. + * Function deletes packets from such RA list and returns true. If no such list + * is found, false is returned. + */ +static bool +mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv, + struct list_head *ra_list_head) +{ + struct mwifiex_ra_list_tbl *ra_list; + struct sk_buff *skb, *tmp; + bool pkt_deleted = false; + struct mwifiex_txinfo *tx_info; + struct mwifiex_adapter *adapter = priv->adapter; + + list_for_each_entry(ra_list, ra_list_head, list) { + if (skb_queue_empty(&ra_list->skb_head)) + continue; + + skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) { + tx_info = MWIFIEX_SKB_TXCB(skb); + if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) { + __skb_unlink(skb, &ra_list->skb_head); + mwifiex_write_data_complete(adapter, skb, 0, + -1); + atomic_dec(&priv->wmm.tx_pkts_queued); + pkt_deleted = true; + } + if ((atomic_read(&adapter->pending_bridged_pkts) <= + MWIFIEX_BRIDGED_PKTS_THR_LOW)) + break; + } + } + + return pkt_deleted; +} + +/* This function deletes packets from particular RA List. RA list index + * from which packets are deleted is preserved so that packets from next RA + * list are deleted upon subsequent call thus maintaining fairness. + */ +static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv) +{ + unsigned long flags; + struct list_head *ra_list; + int i; + + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + + for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) { + if (priv->del_list_idx == MAX_NUM_TID) + priv->del_list_idx = 0; + ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list; + if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list)) { + priv->del_list_idx++; + break; + } + } + + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); +} + + static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, struct sk_buff *skb) { @@ -40,10 +103,11 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); if ((atomic_read(&adapter->pending_bridged_pkts) >= - MWIFIEX_BRIDGED_PKTS_THRESHOLD)) { + MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { dev_err(priv->adapter->dev, "Tx: Bridge packet limit reached. Drop packet!\n"); kfree_skb(skb); + mwifiex_uap_cleanup_tx_queues(priv); return; } -- cgit v0.10.2 From 231d83e29e44e39d987ea4c33caf7869cf5ef4c7 Mon Sep 17 00:00:00 2001 From: Huawei Yang Date: Mon, 22 Jul 2013 19:17:42 -0700 Subject: mwifiex: add tx info to skb when forming mgmt frame In function 'mwifiex_write_data_complete' it need tx info to find the mwifiex_private to updates statistics and wake up tx queues. Or we may trigger tx queues timeout when transmitting lots of mgmt frames. Signed-off-by: Huawei Yang Signed-off-by: Stone Piao Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index ef5fa89..d824fee 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -189,6 +189,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct sk_buff *skb; u16 pkt_len; const struct ieee80211_mgmt *mgmt; + struct mwifiex_txinfo *tx_info; struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); if (!buf || !len) { @@ -216,6 +217,10 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, return -ENOMEM; } + tx_info = MWIFIEX_SKB_TXCB(skb); + tx_info->bss_num = priv->bss_num; + tx_info->bss_type = priv->bss_type; + mwifiex_form_mgmt_frame(skb, buf, len); mwifiex_queue_tx_pkt(priv, skb); -- cgit v0.10.2 From eaf49dbc4b7ef7e9fe420713730e403c9ba0bef1 Mon Sep 17 00:00:00 2001 From: Stone Piao Date: Mon, 22 Jul 2013 19:17:43 -0700 Subject: mwifiex: discard deauth and disassoc event during WPS session Some GO will send deauth or disassoc packet at the end of WPS handshake, which causes P2P connecion failure due to the race condition between event path and data path. Signed-off-by: Stone Piao Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index ea265ec..8b05752 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -201,6 +201,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_DEAUTHENTICATED: dev_dbg(adapter->dev, "event: Deauthenticated\n"); + if (priv->wps.session_enable) { + dev_dbg(adapter->dev, + "info: receive deauth event in wps session\n"); + break; + } adapter->dbg.num_event_deauth++; if (priv->media_connected) { reason_code = @@ -211,6 +216,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_DISASSOCIATED: dev_dbg(adapter->dev, "event: Disassociated\n"); + if (priv->wps.session_enable) { + dev_dbg(adapter->dev, + "info: receive disassoc event in wps session\n"); + break; + } adapter->dbg.num_event_disassoc++; if (priv->media_connected) { reason_code = -- cgit v0.10.2 From 4481b2dba0930015d7c15acd0b69a3a89cae3591 Mon Sep 17 00:00:00 2001 From: Stone Piao Date: Mon, 22 Jul 2013 19:17:44 -0700 Subject: mwifiex: skip registering mgmt frame that has already registered Before sending command to firmware, we need to check the frame type. We skip registering the mgmt frame that has already been registered. Signed-off-by: Stone Piao Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index d824fee..dbdd3b2 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -240,16 +240,20 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy, u16 frame_type, bool reg) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); + u32 mask; if (reg) - priv->mgmt_frame_mask |= BIT(frame_type >> 4); + mask = priv->mgmt_frame_mask | BIT(frame_type >> 4); else - priv->mgmt_frame_mask &= ~BIT(frame_type >> 4); + mask = priv->mgmt_frame_mask & ~BIT(frame_type >> 4); - mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG, - HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask); - - wiphy_dbg(wiphy, "info: mgmt frame registered\n"); + if (mask != priv->mgmt_frame_mask) { + priv->mgmt_frame_mask = mask; + mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG, + HostCmd_ACT_GEN_SET, 0, + &priv->mgmt_frame_mask); + wiphy_dbg(wiphy, "info: mgmt frame registered\n"); + } } /* -- cgit v0.10.2 From 5e4c07987f4db22cbc3e1dadb021baab0a34a57f Mon Sep 17 00:00:00 2001 From: Stone Piao Date: Mon, 22 Jul 2013 19:17:45 -0700 Subject: mwifiex: support to send deauth for P2P client During P2P handshake, P2P client needs to send deauth after EAPOL FAILURE to GO. We need add bss mode for P2P client when handle deauth request. Without this change, deauth can not be sent out from P2P client side. Signed-off-by: Stone Piao Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 1c8a771..ba043ca 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -1425,6 +1425,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) switch (priv->bss_mode) { case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: return mwifiex_deauthenticate_infra(priv, mac); case NL80211_IFTYPE_ADHOC: return mwifiex_send_cmd_sync(priv, -- cgit v0.10.2 From 8795ca61e4ff3db70f2d072a28aaefc29f1a2301 Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Mon, 22 Jul 2013 19:17:46 -0700 Subject: mwifiex: correct max IE length check for WPS IE This patch is bug fix for an invalid boundry check for WPS IE. We should check max IE length against defined macro; instead we were checking it against size of pointer. Fix it. Also move IE length check before allocation of memory. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 206c3e0..c071ce9 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -797,15 +797,16 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { if (ie_len) { - priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL); - if (!priv->wps_ie) - return -ENOMEM; - if (ie_len > sizeof(priv->wps_ie)) { + if (ie_len > MWIFIEX_MAX_VSIE_LEN) { dev_dbg(priv->adapter->dev, "info: failed to copy WPS IE, too big\n"); - kfree(priv->wps_ie); return -1; } + + priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL); + if (!priv->wps_ie) + return -ENOMEM; + memcpy(priv->wps_ie, ie_data_ptr, ie_len); priv->wps_ie_len = ie_len; dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n", -- cgit v0.10.2 From 43ba6b9f2f5e8090f16e8efe19d17cc8ce291d22 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:47 -0700 Subject: mwifiex: add PCIe shutdown handler to avoid system hang on reboot If reboot command is issued when device is in connected state, system hangs while booting. This issue is fixed by doing cleanup in shutdown handler. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 4a57eb4..dce6486 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -235,6 +235,14 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) kfree(card); } +static void mwifiex_pcie_shutdown(struct pci_dev *pdev) +{ + user_rmmod = 1; + mwifiex_pcie_remove(pdev); + + return; +} + static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, @@ -268,6 +276,7 @@ static struct pci_driver __refdata mwifiex_pcie = { .pm = &mwifiex_pcie_pm_ops, }, #endif + .shutdown = mwifiex_pcie_shutdown, }; /* -- cgit v0.10.2 From 003d87cbece50c2a558c954b7ce915dcc70acba7 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:48 -0700 Subject: mwifiex: move del_timer_sync(scan_delay_timer) call to fix memleak Currently it is in mwifiex_adapter_cleanup() which doesn't get called if driver initialization is failed causing memory leak. scan_delay_timer is initialized in mwifiex_register(), so it should be deleted in mwifiex_unregister(). Hence it has been moved to appropriate place. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index fb86cec1..f23520f 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -378,18 +378,11 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) static void mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) { - int i; - if (!adapter) { pr_err("%s: adapter is NULL\n", __func__); return; } - for (i = 0; i < adapter->priv_num; i++) { - if (adapter->priv[i]) - del_timer_sync(&adapter->priv[i]->scan_delay_timer); - } - mwifiex_cancel_all_pending_cmd(adapter); /* Free lock variables */ diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index e15ab72..ef151c5 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -197,6 +197,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { mwifiex_free_curr_bcn(adapter->priv[i]); + del_timer_sync(&adapter->priv[i]->scan_delay_timer); kfree(adapter->priv[i]); } } -- cgit v0.10.2 From f3340e6cfd0100d70653fe8a300049420e464deb Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:49 -0700 Subject: mwifiex: remove unnecessary del_timer(cmd_timer) It is already there in mwifiex_unregister(). So unnecessary call in mwifiex_adapter_cleanup() is removed in this patch. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index f23520f..2a430c9 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -392,8 +392,6 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) dev_dbg(adapter->dev, "info: free cmd buffer\n"); mwifiex_free_cmd_buffer(adapter); - del_timer(&adapter->cmd_timer); - dev_dbg(adapter->dev, "info: free scan table\n"); if (adapter->if_ops.cleanup_if) -- cgit v0.10.2 From 4cfda67d8f37cc422ad9f181c173a241c913055b Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:50 -0700 Subject: mwifiex: move if_ops.cleanup_if() call As if_ops.init_if() is called in mwifiex_register(), corresponding cleanup routine should be called in mwifiex_unregister(). Currently it's there in mwifiex_adapter_cleanup(), hence interface specific cleanup is not performed if driver initialization is failed. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 2a430c9..2ef450e 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -394,9 +394,6 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) dev_dbg(adapter->dev, "info: free scan table\n"); - if (adapter->if_ops.cleanup_if) - adapter->if_ops.cleanup_if(adapter); - if (adapter->sleep_cfm) dev_kfree_skb_any(adapter->sleep_cfm); } diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index ef151c5..f26beb7 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -191,6 +191,9 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) { s32 i; + if (adapter->if_ops.cleanup_if) + adapter->if_ops.cleanup_if(adapter); + del_timer(&adapter->cmd_timer); /* Free private structures */ -- cgit v0.10.2 From 2739a95d708c67ff541826f79ef0b2d19320306f Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:51 -0700 Subject: mwifiex: add unregister_dev handler for usb interface Clear the data pointer stored in USB interface structure in this handler. This helps to return from mwifiex_usb_disconnect() if driver deinitialization is already performed while handling an error path for mwifiex_usb_probe(). USB8797 card gets enumerated twice. First enumeration is for firmware download and second enumeration expects firmware initialization. mwifiex_usb_probe() always takes care of deinitialization for first enumeration after firmware download. Also, this change matches our handling for SDIO and PCIe interfaces. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index f90fe21..fca98b5 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -786,6 +786,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return 0; } +static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) +{ + struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + + usb_set_intfdata(card->intf, NULL); +} + static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, struct mwifiex_fw_image *fw) { @@ -978,6 +985,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) static struct mwifiex_if_ops usb_ops = { .register_dev = mwifiex_register_dev, + .unregister_dev = mwifiex_unregister_dev, .wakeup = mwifiex_pm_wakeup_card, .wakeup_complete = mwifiex_pm_wakeup_card_complete, -- cgit v0.10.2 From d00062e318b1e4a4bec8a8e343efe8bc92d3b109 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:52 -0700 Subject: mwifiex: reduce firmware poll retries After downloading the firmware, firmware status is checked by reading a register. Polling interval is 100 msecs. Therefore 100 retries means the status is checked for 10 secs which is more than sufficient for firmware to get ready. This patch removes 1000 retries macro usage, because 100secs time is not practical. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 1b45aa5..99c2729 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -85,9 +85,6 @@ enum KEY_TYPE_ID { #define WAPI_KEY_LEN 50 #define MAX_POLL_TRIES 100 - -#define MAX_MULTI_INTERFACE_POLL_TRIES 1000 - #define MAX_FIRMWARE_POLL_TRIES 100 #define FIRMWARE_READY_SDIO 0xfedc diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 2ef450e..e4c5ae3 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -691,7 +691,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, if (!adapter->winner) { dev_notice(adapter->dev, "FW already running! Skip FW dnld\n"); - poll_num = MAX_MULTI_INTERFACE_POLL_TRIES; goto poll_fw; } } -- cgit v0.10.2 From a76b20e5ca8a9ec0b45a4150b4ad19e27dd19699 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:53 -0700 Subject: mwifiex: replace mdelay with msleep It is observed that when wrong firmware is downloaded for PCIe card, system hangs for 10 seconds. The reason is mdelay() is used when firmware status is polled. Replace mdelay with msleep(non-blocking API) to fix the issue. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index dce6486..098153f 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1942,7 +1942,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) ret = 0; break; } else { - mdelay(100); + msleep(100); ret = -1; } } diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 5ee5ed0..14ac51f 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -927,7 +927,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, ret = 0; break; } else { - mdelay(100); + msleep(100); ret = -1; } } -- cgit v0.10.2 From bedb361b5092ee9aa77ba9d0114bf75e8520825e Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:54 -0700 Subject: mwifiex: correction in mwifiex_check_fw_status() return status For PCIe cards, when wrong firmware is downloaded, firmware is failed to be ready in 10 seconds. We should return an error at this point. But currently we are sending first command to firmware. As expected firmware doesn't respond to this command and command timeout occurs. This patch fixes the problem by removing unnecessary 'ret' variable modifications in "if (ret) {" block. The block is just supposed to update "adapter->winner" flag. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 098153f..52da8ee 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1954,12 +1954,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) else if (!winner_status) { dev_err(adapter->dev, "PCI-E is the winner\n"); adapter->winner = 1; - ret = -1; } else { dev_err(adapter->dev, "PCI-E is not the winner <%#x,%d>, exit dnld\n", ret, adapter->winner); - ret = 0; } } -- cgit v0.10.2 From 6b21a69fbc5c60c44634963f3fe3e6f1d9140d3a Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:57 -0700 Subject: mwifiex: remove duplicate structure host_cmd_tlv We already have 'struct mwifiex_ie_types_header' with same definition. Hence host_cmd_tlv is removed in this patch. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 99c2729..c0dfc4d 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -1366,11 +1366,6 @@ struct host_cmd_ds_802_11_eeprom_access { u8 value; } __packed; -struct host_cmd_tlv { - __le16 type; - __le16 len; -} __packed; - struct mwifiex_assoc_event { u8 sta_addr[ETH_ALEN]; __le16 type; @@ -1396,99 +1391,99 @@ struct host_cmd_11ac_vht_cfg { } __packed; struct host_cmd_tlv_akmp { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; __le16 key_mgmt; __le16 key_mgmt_operation; } __packed; struct host_cmd_tlv_pwk_cipher { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; __le16 proto; u8 cipher; u8 reserved; } __packed; struct host_cmd_tlv_gwk_cipher { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 cipher; u8 reserved; } __packed; struct host_cmd_tlv_passphrase { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 passphrase[0]; } __packed; struct host_cmd_tlv_wep_key { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 key_index; u8 is_default; u8 key[1]; }; struct host_cmd_tlv_auth_type { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 auth_type; } __packed; struct host_cmd_tlv_encrypt_protocol { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; __le16 proto; } __packed; struct host_cmd_tlv_ssid { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 ssid[0]; } __packed; struct host_cmd_tlv_rates { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 rates[0]; } __packed; struct host_cmd_tlv_bcast_ssid { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 bcast_ctl; } __packed; struct host_cmd_tlv_beacon_period { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; __le16 period; } __packed; struct host_cmd_tlv_dtim_period { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 period; } __packed; struct host_cmd_tlv_frag_threshold { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; __le16 frag_thr; } __packed; struct host_cmd_tlv_rts_threshold { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; __le16 rts_thr; } __packed; struct host_cmd_tlv_retry_limit { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 limit; } __packed; struct host_cmd_tlv_mac_addr { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 mac_addr[ETH_ALEN]; } __packed; struct host_cmd_tlv_channel_band { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; u8 band_config; u8 channel; } __packed; struct host_cmd_tlv_ageout_timer { - struct host_cmd_tlv tlv; + struct mwifiex_ie_types_header header; __le32 sta_ao_timer; } __packed; diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c index e38342f..220af4f 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/mwifiex/ie.c @@ -87,7 +87,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, u8 *tmp; input_len = le16_to_cpu(ie_list->len); - travel_len = sizeof(struct host_cmd_tlv); + travel_len = sizeof(struct mwifiex_ie_types_header); ie_list->len = 0; diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 8ece485..9b75ed8 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -707,8 +707,9 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv, if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) { tlv_mac = (void *)((u8 *)&key_material->key_param_set + key_param_len); - tlv_mac->tlv.type = cpu_to_le16(TLV_TYPE_STA_MAC_ADDR); - tlv_mac->tlv.len = cpu_to_le16(ETH_ALEN); + tlv_mac->header.type = + cpu_to_le16(TLV_TYPE_STA_MAC_ADDR); + tlv_mac->header.len = cpu_to_le16(ETH_ALEN); memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN); cmd_size = key_param_len + S_DS_GEN + sizeof(key_material->action) + diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 2de882d..64424c8 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -293,9 +293,9 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size) u8 *tlv = *tlv_buf; tlv_akmp = (struct host_cmd_tlv_akmp *)tlv; - tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP); - tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) - - sizeof(struct host_cmd_tlv)); + tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP); + tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) - + sizeof(struct mwifiex_ie_types_header)); tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation); tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt); cmd_size += sizeof(struct host_cmd_tlv_akmp); @@ -303,10 +303,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size) if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) { pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv; - pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); - pwk_cipher->tlv.len = + pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); + pwk_cipher->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA); pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa; cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher); @@ -315,10 +315,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size) if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) { pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv; - pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); - pwk_cipher->tlv.len = + pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER); + pwk_cipher->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2); pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2; cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher); @@ -327,10 +327,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size) if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) { gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv; - gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER); - gwk_cipher->tlv.len = + gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER); + gwk_cipher->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher; cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher); tlv += sizeof(struct host_cmd_tlv_gwk_cipher); @@ -338,13 +338,15 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size) if (bss_cfg->wpa_cfg.length) { passphrase = (struct host_cmd_tlv_passphrase *)tlv; - passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE); - passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length); + passphrase->header.type = + cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE); + passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length); memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase, bss_cfg->wpa_cfg.length); - cmd_size += sizeof(struct host_cmd_tlv) + + cmd_size += sizeof(struct mwifiex_ie_types_header) + bss_cfg->wpa_cfg.length; - tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length; + tlv += sizeof(struct mwifiex_ie_types_header) + + bss_cfg->wpa_cfg.length; } *param_size = cmd_size; @@ -403,16 +405,17 @@ mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size) (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 || bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) { wep_key = (struct host_cmd_tlv_wep_key *)tlv; - wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY); - wep_key->tlv.len = + wep_key->header.type = + cpu_to_le16(TLV_TYPE_UAP_WEP_KEY); + wep_key->header.len = cpu_to_le16(bss_cfg->wep_cfg[i].length + 2); wep_key->key_index = bss_cfg->wep_cfg[i].key_index; wep_key->is_default = bss_cfg->wep_cfg[i].is_default; memcpy(wep_key->key, bss_cfg->wep_cfg[i].key, bss_cfg->wep_cfg[i].length); - cmd_size += sizeof(struct host_cmd_tlv) + 2 + + cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 + bss_cfg->wep_cfg[i].length; - tlv += sizeof(struct host_cmd_tlv) + 2 + + tlv += sizeof(struct mwifiex_ie_types_header) + 2 + bss_cfg->wep_cfg[i].length; } } @@ -449,16 +452,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) if (bss_cfg->ssid.ssid_len) { ssid = (struct host_cmd_tlv_ssid *)tlv; - ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_SSID); - ssid->tlv.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len); + ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID); + ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len); memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len); - cmd_size += sizeof(struct host_cmd_tlv) + + cmd_size += sizeof(struct mwifiex_ie_types_header) + bss_cfg->ssid.ssid_len; - tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; + tlv += sizeof(struct mwifiex_ie_types_header) + + bss_cfg->ssid.ssid_len; bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv; - bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); - bcast_ssid->tlv.len = + bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); + bcast_ssid->header.len = cpu_to_le16(sizeof(bcast_ssid->bcast_ctl)); bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl; cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); @@ -466,13 +470,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) } if (bss_cfg->rates[0]) { tlv_rates = (struct host_cmd_tlv_rates *)tlv; - tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES); + tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES); for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i]; i++) tlv_rates->rates[i] = bss_cfg->rates[i]; - tlv_rates->tlv.len = cpu_to_le16(i); + tlv_rates->header.len = cpu_to_le16(i); cmd_size += sizeof(struct host_cmd_tlv_rates) + i; tlv += sizeof(struct host_cmd_tlv_rates) + i; } @@ -482,10 +486,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) (bss_cfg->band_cfg == BAND_CONFIG_A && bss_cfg->channel <= MAX_CHANNEL_BAND_A))) { chan_band = (struct host_cmd_tlv_channel_band *)tlv; - chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); - chan_band->tlv.len = + chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); + chan_band->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); chan_band->band_config = bss_cfg->band_cfg; chan_band->channel = bss_cfg->channel; cmd_size += sizeof(struct host_cmd_tlv_channel_band); @@ -494,11 +498,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD && bss_cfg->beacon_period <= MAX_BEACON_PERIOD) { beacon_period = (struct host_cmd_tlv_beacon_period *)tlv; - beacon_period->tlv.type = + beacon_period->header.type = cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD); - beacon_period->tlv.len = + beacon_period->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); beacon_period->period = cpu_to_le16(bss_cfg->beacon_period); cmd_size += sizeof(struct host_cmd_tlv_beacon_period); tlv += sizeof(struct host_cmd_tlv_beacon_period); @@ -506,21 +510,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD && bss_cfg->dtim_period <= MAX_DTIM_PERIOD) { dtim_period = (struct host_cmd_tlv_dtim_period *)tlv; - dtim_period->tlv.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD); - dtim_period->tlv.len = + dtim_period->header.type = + cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD); + dtim_period->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); dtim_period->period = bss_cfg->dtim_period; cmd_size += sizeof(struct host_cmd_tlv_dtim_period); tlv += sizeof(struct host_cmd_tlv_dtim_period); } if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) { rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv; - rts_threshold->tlv.type = + rts_threshold->header.type = cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD); - rts_threshold->tlv.len = + rts_threshold->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold); cmd_size += sizeof(struct host_cmd_tlv_frag_threshold); tlv += sizeof(struct host_cmd_tlv_frag_threshold); @@ -528,21 +533,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) && (bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) { frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv; - frag_threshold->tlv.type = + frag_threshold->header.type = cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD); - frag_threshold->tlv.len = + frag_threshold->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold); cmd_size += sizeof(struct host_cmd_tlv_frag_threshold); tlv += sizeof(struct host_cmd_tlv_frag_threshold); } if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) { retry_limit = (struct host_cmd_tlv_retry_limit *)tlv; - retry_limit->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT); - retry_limit->tlv.len = + retry_limit->header.type = + cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT); + retry_limit->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); retry_limit->limit = (u8)bss_cfg->retry_limit; cmd_size += sizeof(struct host_cmd_tlv_retry_limit); tlv += sizeof(struct host_cmd_tlv_retry_limit); @@ -557,21 +563,21 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) || (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) { auth_type = (struct host_cmd_tlv_auth_type *)tlv; - auth_type->tlv.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); - auth_type->tlv.len = + auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); + auth_type->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) - - sizeof(struct host_cmd_tlv)); + sizeof(struct mwifiex_ie_types_header)); auth_type->auth_type = (u8)bss_cfg->auth_mode; cmd_size += sizeof(struct host_cmd_tlv_auth_type); tlv += sizeof(struct host_cmd_tlv_auth_type); } if (bss_cfg->protocol) { encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv; - encrypt_protocol->tlv.type = + encrypt_protocol->header.type = cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL); - encrypt_protocol->tlv.len = + encrypt_protocol->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol) - - sizeof(struct host_cmd_tlv)); + - sizeof(struct mwifiex_ie_types_header)); encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol); cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol); tlv += sizeof(struct host_cmd_tlv_encrypt_protocol); @@ -608,9 +614,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) if (bss_cfg->sta_ao_timer) { ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv; - ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER); - ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) - - sizeof(struct host_cmd_tlv)); + ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER); + ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) - + sizeof(struct mwifiex_ie_types_header)); ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer); cmd_size += sizeof(*ao_timer); tlv += sizeof(*ao_timer); @@ -618,9 +624,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) if (bss_cfg->ps_sta_ao_timer) { ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv; - ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER); - ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) - - sizeof(struct host_cmd_tlv)); + ps_ao_timer->header.type = + cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER); + ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) - + sizeof(struct mwifiex_ie_types_header)); ps_ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->ps_sta_ao_timer); cmd_size += sizeof(*ps_ao_timer); @@ -636,16 +643,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size) { struct mwifiex_ie_list *ap_ie = cmd_buf; - struct host_cmd_tlv *tlv_ie = (struct host_cmd_tlv *)tlv; + struct mwifiex_ie_types_header *tlv_ie = (void *)tlv; if (!ap_ie || !ap_ie->len || !ap_ie->ie_list) return -1; - *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct host_cmd_tlv); + *ie_size += le16_to_cpu(ap_ie->len) + + sizeof(struct mwifiex_ie_types_header); tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE); tlv_ie->len = ap_ie->len; - tlv += sizeof(struct host_cmd_tlv); + tlv += sizeof(struct mwifiex_ie_types_header); memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len)); -- cgit v0.10.2 From a4df8f56e92104c5e7be82cc2c471e600ea3f9ef Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Mon, 22 Jul 2013 19:17:58 -0700 Subject: mwifiex: modify mwifiex_ap_sta_limits to advertise support for P2P We support maximum simultaneous 2 non-AP station interfaces and they can assume role of Station/P2P client/P2P GO. Advertise this support to cfg80211 so that concurrent P2P/STA operation is possible. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index dbdd3b2..cc334d5 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -25,7 +25,9 @@ module_param(reg_alpha2, charp, 0); static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { { - .max = 2, .types = BIT(NL80211_IFTYPE_STATION), + .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_CLIENT), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP), -- cgit v0.10.2 From c2b8359d7878f75584ed68b7a2b930bd9bea814d Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 23 Jul 2013 16:25:17 +0530 Subject: ath9k: Fix diversity combining for AR9285 When antenna diversity combining is enabled in the EEPROM, the initial values for the MAIN/ALT config have to be programmed correctly. This patch adds it for AR9285. Since the diversity combining macros are common to all chip families, remove the redundant AR9285 macros and move the definitions to phy.h. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h index f9eb2c3..d3f0928 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h @@ -317,10 +317,6 @@ #define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29 #define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000 #define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30 -#define AR_PHY_9285_ANT_DIV_LNA1 2 -#define AR_PHY_9285_ANT_DIV_LNA2 1 -#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3 -#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0 #define AR_PHY_9285_ANT_DIV_GAINTB_0 0 #define AR_PHY_9285_ANT_DIV_GAINTB_1 1 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index d105e43..a98e6a3 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3673,9 +3673,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) AR_PHY_ANT_DIV_ALT_GAINTB | AR_PHY_ANT_DIV_MAIN_GAINTB)); /* by default use LNA1 for the main antenna */ - regval |= (AR_PHY_ANT_DIV_LNA1 << + regval |= (ATH_ANT_DIV_COMB_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S); - regval |= (AR_PHY_ANT_DIV_LNA2 << + regval |= (ATH_ANT_DIV_COMB_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S); REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 2db4ddf..3ec33ce 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1465,8 +1465,8 @@ static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah, AR_PHY_ANT_DIV_ALT_LNACONF | AR_PHY_ANT_DIV_MAIN_GAINTB | AR_PHY_ANT_DIV_ALT_GAINTB); - regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S); - regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S); + regval |= (ATH_ANT_DIV_COMB_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S); + regval |= (ATH_ANT_DIV_COMB_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S); REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); } } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index d4d39f3..23c019d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -296,11 +296,6 @@ #define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000 #define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30 -#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0 -#define AR_PHY_ANT_DIV_LNA2 0x1 -#define AR_PHY_ANT_DIV_LNA1 0x2 -#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3 - #define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c) #define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30) #define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index c1224b5..76e38d3 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -591,13 +591,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc) #define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2 #define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2 -enum ath9k_ant_div_comb_lna_conf { - ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2, - ATH_ANT_DIV_COMB_LNA2, - ATH_ANT_DIV_COMB_LNA1, - ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2, -}; - struct ath_ant_comb { u16 count; u16 total_pkt_count; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index c2bfd748..9ea8e4b 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -812,6 +812,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah, static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { + struct ath9k_hw_capabilities *pCap = &ah->caps; struct modal_eep_4k_header *pModal; struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; @@ -858,6 +859,24 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); + + if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { + /* + * If diversity combining is enabled, + * set MAIN to LNA1 and ALT to LNA2 initially. + */ + regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); + regVal &= (~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF | + AR_PHY_9285_ANT_DIV_ALT_LNACONF)); + + regVal |= (ATH_ANT_DIV_COMB_LNA1 << + AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S); + regVal |= (ATH_ANT_DIV_COMB_LNA2 << + AR_PHY_9285_ANT_DIV_ALT_LNACONF_S); + regVal &= (~(AR_PHY_9285_FAST_DIV_BIAS)); + regVal |= (0 << AR_PHY_9285_FAST_DIV_BIAS_S); + REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal); + } } if (pModal->version >= 2) { diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h index 8b38030..4a1b992 100644 --- a/drivers/net/wireless/ath/ath9k/phy.h +++ b/drivers/net/wireless/ath/ath9k/phy.h @@ -48,4 +48,11 @@ #define AR_PHY_PLL_CONTROL 0x16180 #define AR_PHY_PLL_MODE 0x16184 +enum ath9k_ant_div_comb_lna_conf { + ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2, + ATH_ANT_DIV_COMB_LNA2, + ATH_ANT_DIV_COMB_LNA1, + ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2, +}; + #endif -- cgit v0.10.2 From 637065267eab4817c0b06cbf3c7fc80842acab99 Mon Sep 17 00:00:00 2001 From: Xose Vazquez Perez Date: Tue, 23 Jul 2013 14:55:15 +0200 Subject: wireless: rt2x00: rt2800usb: add RT3573 devices taken from Ralink linux and windows drivers: 0x1b75, 0x7733 AirLive 450Mbps Wireless-N Dual Band USB Adapter 0x0b05, 0x17bc ASUS USB-N66 450Mbps Dual Band USB Adapter 0x0b05, 0x17ad ASUS USB-N66 Dual Band N Network Adapter 0x050d, 0x1103 Belkin Wireless Adapter 0x148f, 0xf301 Cameo Ralink3573 3x3 single band USB dongle 0x7392, 0x7733 Edimax 0x0e66, 0x0020 Hawking HD45U Dual Band USB Wireless-N Adapter 0x0e66, 0x0021 Hawking HD45U Dual Band Wls-450N Adapter 0x04bb, 0x094e I-O DATA WN-AG450U Wireless LAN Adapter 0x0789, 0x016b Logitec LAN-W450AN/U2 0x0846, 0x9012 NETGEAR WNDA4100 N900 Wireless Dual Band USB Adapter 0x0846, 0x9019 NETGEAR WNDA4200D Wireless Dual Band USB Adapter 0x2019, 0xed19 Planex GW-USDual450 0x148f, 0x3573 Ralink 802.11n USB Wireless LAN Card 0x0df6, 0x0067 Sitecom Wireless Dualband Network Adapter N750 X6 0x0df6, 0x006a Sitecom Wireless Dualband Network Adapter N900 X7 0x0586, 0x3421 ZyXEL Dual-Band Wireless N450 USB Adapter Cc: Ivo van Doorn Cc: Gertjan van Wingerde Cc: Helmut Schaa Cc: John W. Linville Cc: users@rt2x00.serialmonkey.com Cc: linux-wireless@vger.kernel.org Signed-off-by: Xose Vazquez Perez Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index ab609ae..fc9efdf 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1198,8 +1198,38 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x5a57, 0x0284) }, #endif #ifdef CONFIG_RT2800USB_RT3573 + /* AirLive */ + { USB_DEVICE(0x1b75, 0x7733) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x17bc) }, + { USB_DEVICE(0x0b05, 0x17ad) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x1103) }, + /* Cameo */ + { USB_DEVICE(0x148f, 0xf301) }, + /* Edimax */ + { USB_DEVICE(0x7392, 0x7733) }, + /* Hawking */ + { USB_DEVICE(0x0e66, 0x0020) }, + { USB_DEVICE(0x0e66, 0x0021) }, + /* I-O DATA */ + { USB_DEVICE(0x04bb, 0x094e) }, /* Linksys */ { USB_DEVICE(0x13b1, 0x003b) }, + /* Logitec */ + { USB_DEVICE(0x0789, 0x016b) }, + /* NETGEAR */ + { USB_DEVICE(0x0846, 0x9012) }, + { USB_DEVICE(0x0846, 0x9019) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xed19) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x3573) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x0067) }, + { USB_DEVICE(0x0df6, 0x006a) }, + /* ZyXEL */ + { USB_DEVICE(0x0586, 0x3421) }, #endif #ifdef CONFIG_RT2800USB_RT53XX /* Arcadyan */ -- cgit v0.10.2 From 72bb2f2678878dd4a758e628957f29ce28000d88 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Tue, 23 Jul 2013 23:28:49 +0200 Subject: bgmac: make bgmac depend on bcm47xx bgmac uses bcm47xx_nvram.h which is only available when BCM47XX was selected. Earlier BCMA_HOST_SOC depended on BCM47XX so this was not build on any other archs, but that changed. We should modify this driver to get access to the nvram or the variables through platform data. This fixes a build problem in linux-next reported by Stephen Rothwell: drivers/net/ethernet/broadcom/bgmac.c:19:27: fatal error: bcm47xx_nvram.h: No such file or directory #include ^ Signed-off-by: Hauke Mehrtens Reported-by: Stephen Rothwell Signed-off-by: John W. Linville diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 1d680ba..7b839bf 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -130,7 +130,7 @@ config BNX2X_SRIOV config BGMAC tristate "BCMA bus GBit core support" - depends on BCMA_HOST_SOC && HAS_DMA + depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. They can be found on BCM47xx SoCs and provide gigabit ethernet. -- cgit v0.10.2 From 86a37def2bba8d22f8b512e7c8a0f6f6e197728c Mon Sep 17 00:00:00 2001 From: "fan.du" Date: Mon, 22 Jul 2013 14:20:12 +0800 Subject: net ipv6: Remove rebundant rt6i_nsiblings initialization Seting rt->rt6i_nsiblings to zero is rebundant, because above memset zeroed the rest of rt excluding the first dst memember. Signed-off-by: Fan Du Acked-by: Nicolas Dichtel Acked-by: Hannes Frederic Sowa Cc: Hideaki YOSHIFUJI Cc: James Morris Signed-off-by: David S. Miller diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a8c891a..74ab1f7 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -285,7 +285,6 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net, rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); rt->rt6i_genid = rt_genid(net); INIT_LIST_HEAD(&rt->rt6i_siblings); - rt->rt6i_nsiblings = 0; } return rt; } -- cgit v0.10.2 From 9225b23057aeba13957e8033f69a554dc246a80e Mon Sep 17 00:00:00 2001 From: "fan.du" Date: Mon, 22 Jul 2013 14:21:09 +0800 Subject: net: ipv6 eliminate parameter "int addrlen" in function fib6_add_1 The "int addrlen" in fib6_add_1 is rebundant, as we can get it from parameter "struct in6_addr *addr" once we modified its type. And also fix some coding style issues in fib6_add_1 Signed-off-by: Fan Du Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5fc9c7a..8b6c773 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -425,8 +425,8 @@ out: * node. */ -static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, - int addrlen, int plen, +static struct fib6_node *fib6_add_1(struct fib6_node *root, + struct in6_addr *addr, int plen, int offset, int allow_create, int replace_required) { @@ -543,7 +543,7 @@ insert_above: but if it is >= plen, the value is ignored in any case. */ - bit = __ipv6_addr_diff(addr, &key->addr, addrlen); + bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr)); /* * (intermediate)[in] @@ -822,9 +822,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) if (!allow_create && !replace_required) pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); - fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), - rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), - allow_create, replace_required); + fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen, + offsetof(struct rt6_info, rt6i_dst), allow_create, + replace_required); if (IS_ERR(fn)) { err = PTR_ERR(fn); @@ -863,7 +863,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) /* Now add the first leaf node to new subtree */ sn = fib6_add_1(sfn, &rt->rt6i_src.addr, - sizeof(struct in6_addr), rt->rt6i_src.plen, + rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), allow_create, replace_required); @@ -882,7 +882,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) fn->subtree = sfn; } else { sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, - sizeof(struct in6_addr), rt->rt6i_src.plen, + rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), allow_create, replace_required); -- cgit v0.10.2 From 55754f19d7ee4fa3633f55a4a084af8590c35efa Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 23 Jul 2013 10:18:04 +0900 Subject: sh_eth: add support for RMIIMODE register This register is prsent on the r8a7790 SoC. Signed-off-by: Simon Horman Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a753928..87af49f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { [RMCR] = 0x0258, [TFUCR] = 0x0264, [RFOCR] = 0x0268, + [RMIIMODE] = 0x026c, [FCFTR] = 0x0270, [TRIMD] = 0x027c, }; @@ -1124,6 +1125,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) if (ret) goto out; + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 99995bf..da93f5c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -60,6 +60,7 @@ enum { EDOCR, TFUCR, RFOCR, + RMIIMODE, FCFTR, RPADIR, TRIMD, @@ -482,6 +483,7 @@ struct sh_eth_cpu_data { unsigned hw_crc:1; /* E-DMAC have CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ + unsigned rmiimode:1; /* EtherC has RMIIMODE register */ }; struct sh_eth_private { -- cgit v0.10.2 From e18dbf7efcb7a727e5db773597ddfd6981a530e6 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 23 Jul 2013 10:18:05 +0900 Subject: sh_eth: Add support for r8a7790 SoC This is a copy of support for r8a7778/9 with the .rmiimode mode bit of struct sh_eth_cpu_data set. Also update R8A7779 to R8A777x. Signed-off-by: Simon Horman Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 19a8a04..a30c439 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -13,4 +13,4 @@ config SH_ETH Renesas SuperH Ethernet device driver. This driver supporting CPUs are: - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - R8A7740 and R8A7779. + R8A7740, R8A777x and R8A7790. diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 87af49f..fedc0a0 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -393,6 +393,26 @@ static struct sh_eth_cpu_data r8a777x_data = { .hw_swap = 1, }; +/* R8A7790 */ +static struct sh_eth_cpu_data r8a7790_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_r8a777x, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rmiimode = 1, +}; + static void sh_eth_set_rate_sh7724(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2753,6 +2773,7 @@ static struct platform_device_id sh_eth_id_table[] = { { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, + { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data }, { } }; MODULE_DEVICE_TABLE(platform, sh_eth_id_table); -- cgit v0.10.2 From 5a712c13d3641a3bed243753f115434a7125e440 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 23 Jul 2013 15:24:59 +0530 Subject: be2net: fix MAC address modification for VF Currently, the VFs by default don't have the privilege to modify MAC address. This will change in a subsequent fix wherein VFs will have the ability to modify MAC/VLAN filters. Fix be_mac_addr_set() logic to support MAC address modification on a privileged VF too. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a1..632c0d3 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2606,9 +2606,12 @@ err: return status; } -/* Uses synchronous MCCQ */ +/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. + * pmac_id_valid: false => pmac_id or MAC address is requested. + * If pmac_id is returned, pmac_id_valid is returned as true + */ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, - bool *pmac_id_active, u32 *pmac_id, u8 domain) + bool *pmac_id_valid, u32 *pmac_id, u8 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_mac_list *req; @@ -2644,12 +2647,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, get_mac_list_cmd.size, wrb, &get_mac_list_cmd); req->hdr.domain = domain; req->mac_type = MAC_ADDRESS_TYPE_NETWORK; - req->perm_override = 1; + if (*pmac_id_valid) { + req->mac_id = cpu_to_le32(*pmac_id); + req->iface_id = cpu_to_le16(adapter->if_handle); + req->perm_override = 0; + } else { + req->perm_override = 1; + } status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_mac_list *resp = get_mac_list_cmd.va; + + if (*pmac_id_valid) { + memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, + ETH_ALEN); + goto out; + } + mac_count = resp->true_mac_count + resp->pseudo_mac_count; /* Mac list returned could contain one or more active mac_ids * or one or more true or pseudo permanant mac addresses. @@ -2667,14 +2683,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, * is 6 bytes */ if (mac_addr_size == sizeof(u32)) { - *pmac_id_active = true; + *pmac_id_valid = true; mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; *pmac_id = le32_to_cpu(mac_id); goto out; } } /* If no active mac_id found, return first mac addr */ - *pmac_id_active = false; + *pmac_id_valid = false; memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, ETH_ALEN); } @@ -2686,6 +2702,23 @@ out: return status; } +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac) +{ + int status; + bool active = true; + + /* When SH FW is ready, SH should use Lancer path too */ + if (lancer_chip(adapter)) { + /* Fetch the MAC address using pmac_id */ + status = be_cmd_get_mac_from_list(adapter, mac, &active, + &curr_pmac_id, 0); + return status; + } else { + return be_cmd_mac_addr_query(adapter, mac, false, + adapter->if_handle, curr_pmac_id); + } +} + /* Uses synchronous MCCQ */ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88..7ecca3b 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1924,6 +1924,8 @@ extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, bool *pmac_id_active, u32 *pmac_id, u8 domain); +extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, + u8 *mac); extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 181edb5..29e7870 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -247,54 +247,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) static int be_mac_addr_set(struct net_device *netdev, void *p) { struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; struct sockaddr *addr = p; - int status = 0; - u8 current_mac[ETH_ALEN]; - u32 pmac_id = adapter->pmac_id[0]; - bool active_mac = true; + int status; + u8 mac[ETH_ALEN]; + u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - /* For BE VF, MAC address is already activated by PF. - * Hence only operation left is updating netdev->devaddr. - * Update it if user is passing the same MAC which was used - * during configuring VF MAC from PF(Hypervisor). + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT + * privilege or if PF did not provision the new MAC address. + * On BE3, this cmd will always fail if the VF doesn't have the + * FILTMGMT privilege. This failure is OK, only if the PF programmed + * the MAC for the VF. */ - if (!lancer_chip(adapter) && !be_physfn(adapter)) { - status = be_cmd_mac_addr_query(adapter, current_mac, - false, adapter->if_handle, 0); - if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN)) - goto done; - else - goto err; - } + status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, + adapter->if_handle, &adapter->pmac_id[0], 0); + if (!status) { + curr_pmac_id = adapter->pmac_id[0]; - if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN)) - goto done; + /* Delete the old programmed MAC. This call may fail if the + * old MAC was already deleted by the PF driver. + */ + if (adapter->pmac_id[0] != old_pmac_id) + be_cmd_pmac_del(adapter, adapter->if_handle, + old_pmac_id, 0); + } - /* For Lancer check if any MAC is active. - * If active, get its mac id. + /* Decide if the new MAC is successfully activated only after + * querying the FW */ - if (lancer_chip(adapter) && !be_physfn(adapter)) - be_cmd_get_mac_from_list(adapter, current_mac, &active_mac, - &pmac_id, 0); - - status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, - adapter->if_handle, - &adapter->pmac_id[0], 0); - + status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac); if (status) goto err; - if (active_mac) - be_cmd_pmac_del(adapter, adapter->if_handle, - pmac_id, 0); -done: + /* The MAC change did not happen, either due to lack of privilege + * or PF didn't pre-provision. + */ + if (memcmp(addr->sa_data, mac, ETH_ALEN)) { + status = -EPERM; + goto err; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + dev_info(dev, "MAC address changed to %pM\n", mac); return 0; err: - dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data); + dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); return status; } -- cgit v0.10.2 From 04a060280a083860e4d4723b878d69af1ba53ec9 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 23 Jul 2013 15:25:00 +0530 Subject: be2net: allow VFs to program MAC and VLAN filters In the current design VFs were not allowed to program MAC/VLAN filters. Only the PF driver was allowed to configure/provision MAC and transparent VLANs to a VF. Change this to support MAC/VLAN filtering on a VF by a VM. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 632c0d3..5d37601 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2606,6 +2606,38 @@ err: return status; } +/* Set privilege(s) for a function */ +int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, + u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_fn_privileges *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), + wrb, NULL); + req->hdr.domain = domain; + if (lancer_chip(adapter)) + req->privileges_lancer = cpu_to_le32(privileges); + else + req->privileges = cpu_to_le32(privileges); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. * pmac_id_valid: false => pmac_id or MAC address is requested. * If pmac_id is returned, pmac_id_valid is returned as true diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 7ecca3b..671a89d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -202,6 +202,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_READ_TRANSRECV_DATA 73 #define OPCODE_COMMON_GET_PORT_NAME 77 #define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 +#define OPCODE_COMMON_SET_FN_PRIVILEGES 100 #define OPCODE_COMMON_GET_PHY_DETAILS 102 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 @@ -1474,6 +1475,11 @@ struct be_cmd_resp_get_fn_privileges { u32 privilege_mask; }; +struct be_cmd_req_set_fn_privileges { + struct be_cmd_req_hdr hdr; + u32 privileges; /* Used by BE3, SH-R */ + u32 privileges_lancer; /* Used by Lancer */ +}; /******************** GET/SET_MACLIST **************************/ #define BE_MAX_MAC 64 @@ -1921,6 +1927,8 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, u32 domain); +extern int be_cmd_set_fn_privileges(struct be_adapter *adapter, + u32 privileges, u32 vf_num); extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, bool *pmac_id_active, u32 *pmac_id, u8 domain); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 29e7870..858bb47 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2880,6 +2880,7 @@ static int be_vf_setup(struct be_adapter *adapter) u16 def_vlan, lnk_speed; int status, old_vfs, vf; struct device *dev = &adapter->pdev->dev; + u32 privileges; old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { @@ -2923,6 +2924,18 @@ static int be_vf_setup(struct be_adapter *adapter) } for_all_vfs(adapter, vf_cfg, vf) { + /* Allow VFs to programs MAC/VLAN filters */ + status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); + if (!status && !(privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, + privileges | + BE_PRIV_FILTMGMT, + vf + 1); + if (!status) + dev_info(dev, "VF%d has FILTMGMT privilege\n", + vf); + } + /* BE3 FW, by default, caps VF TX-rate to 100mbps. * Allow full available bandwidth */ -- cgit v0.10.2 From b5bb9776b143dcf7931c6e00330bf8d518376760 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 23 Jul 2013 15:25:01 +0530 Subject: be2net: fix pmac_id for BE3 VFs For BE3 VFs, the permanent MAC is added by its PF. The VF can retrieve its pmac_id only via the IFACE_CREATE cmd. This is not true for Lancer and SH-R VFs which get the pmac_id by issuing a ADD_IFACE_MAC cmd. So, use this hack only for BE3 VFs. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 5d37601..b384ae7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1339,6 +1339,10 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(wrb); *if_handle = le32_to_cpu(resp->interface_id); + + /* Hack to retrieve VF's pmac-id on BE3 */ + if (BE3_chip(adapter) && !be_physfn(adapter)) + adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); } err: -- cgit v0.10.2 From 95046b927a54f461766f83a212c6a93bc5fd2e67 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 23 Jul 2013 15:25:02 +0530 Subject: be2net: refactor MAC-addr setup code The code to configure the permanent MAC in be_setup() has become quite complicated, with different FW cmds being used for BEx, SH-R and Lancer. Simplify the logic by moving some of this complexity to be_cmds.c. This makes the code in be_setup() a little more readable. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index b384ae7..f4ee94e 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2755,6 +2755,24 @@ int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac) } } +int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) +{ + int status; + bool pmac_valid = false; + + memset(mac, 0, ETH_ALEN); + + if (lancer_chip(adapter)) + status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, + NULL, 0); + else if (be_physfn(adapter)) + status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0); + else + status = be_cmd_mac_addr_query(adapter, mac, false, + adapter->if_handle, 0); + return status; +} + /* Uses synchronous MCCQ */ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 671a89d..5e9e2b0 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1934,6 +1934,7 @@ extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, u8 domain); extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac); +extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 858bb47..73b0009 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2759,7 +2759,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter) int status, vf; u8 mac[ETH_ALEN]; struct be_vf_cfg *vf_cfg; - bool active; + bool active = false; for_all_vfs(adapter, vf_cfg, vf) { be_cmd_get_mac_from_list(adapter, mac, &active, @@ -2984,41 +2984,6 @@ static void be_setup_init(struct be_adapter *adapter) adapter->cmd_privileges = MIN_PRIVILEGES; } -static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, - bool *active_mac, u32 *pmac_id) -{ - int status = 0; - - if (!is_zero_ether_addr(adapter->netdev->perm_addr)) { - memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); - if (!lancer_chip(adapter) && !be_physfn(adapter)) - *active_mac = true; - else - *active_mac = false; - - return status; - } - - if (lancer_chip(adapter)) { - status = be_cmd_get_mac_from_list(adapter, mac, - active_mac, pmac_id, 0); - if (*active_mac) { - status = be_cmd_mac_addr_query(adapter, mac, false, - if_handle, *pmac_id); - } - } else if (be_physfn(adapter)) { - /* For BE3, for PF get permanent MAC */ - status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0); - *active_mac = false; - } else { - /* For BE3, for VF get soft MAC assigned by PF*/ - status = be_cmd_mac_addr_query(adapter, mac, false, - if_handle, 0); - *active_mac = true; - } - return status; -} - static void be_get_resources(struct be_adapter *adapter) { u16 dev_num_vfs; @@ -3124,14 +3089,38 @@ err: return status; } +static int be_mac_setup(struct be_adapter *adapter) +{ + u8 mac[ETH_ALEN]; + int status; + + if (is_zero_ether_addr(adapter->netdev->dev_addr)) { + status = be_cmd_get_perm_mac(adapter, mac); + if (status) + return status; + + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + } else { + /* Maybe the HW was reset; dev_addr must be re-programmed */ + memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); + } + + /* On BE3 VFs this cmd may fail due to lack of privilege. + * Ignore the failure as in this case pmac_id is fetched + * in the IFACE_CREATE cmd. + */ + be_cmd_pmac_add(adapter, mac, adapter->if_handle, + &adapter->pmac_id[0], 0); + return 0; +} + static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; u32 en_flags; u32 tx_fc, rx_fc; int status; - u8 mac[ETH_ALEN]; - bool active_mac; be_setup_init(adapter); @@ -3171,36 +3160,18 @@ static int be_setup(struct be_adapter *adapter) en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; - if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) en_flags |= BE_IF_FLAGS_RSS; - en_flags = en_flags & adapter->if_cap_flags; - status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags, &adapter->if_handle, 0); if (status != 0) goto err; - memset(mac, 0, ETH_ALEN); - active_mac = false; - status = be_get_mac_addr(adapter, mac, adapter->if_handle, - &active_mac, &adapter->pmac_id[0]); - if (status != 0) + status = be_mac_setup(adapter); + if (status) goto err; - if (!active_mac) { - status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); - if (status != 0) - goto err; - } - - if (is_zero_ether_addr(adapter->netdev->dev_addr)) { - memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); - memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - } - status = be_tx_qs_create(adapter); if (status) goto err; -- cgit v0.10.2 From 3175d8c2d05f4c87ba616d4c11037a4ad3f77378 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 23 Jul 2013 15:25:03 +0530 Subject: be2net: use SET/GET_MAC_LIST for SH-R On SH-R and Lancer-R, GET_MAC_LIST cmd is better supported (instead of NTWK_MAC_QUERY cmd) to query provisioned MAC addresses. Similiarly, (on SH-R and Lancer-R) SET_MAC_LIST must be used by the PF to provision a permanent MAC addresses to the VF. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index f4ee94e..613d887 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2740,19 +2740,15 @@ out: int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac) { - int status; bool active = true; - /* When SH FW is ready, SH should use Lancer path too */ - if (lancer_chip(adapter)) { - /* Fetch the MAC address using pmac_id */ - status = be_cmd_get_mac_from_list(adapter, mac, &active, - &curr_pmac_id, 0); - return status; - } else { + if (BEx_chip(adapter)) return be_cmd_mac_addr_query(adapter, mac, false, adapter->if_handle, curr_pmac_id); - } + else + /* Fetch the MAC address using pmac_id */ + return be_cmd_get_mac_from_list(adapter, mac, &active, + &curr_pmac_id, 0); } int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) @@ -2762,14 +2758,18 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) memset(mac, 0, ETH_ALEN); - if (lancer_chip(adapter)) + if (BEx_chip(adapter)) { + if (be_physfn(adapter)) + status = be_cmd_mac_addr_query(adapter, mac, true, 0, + 0); + else + status = be_cmd_mac_addr_query(adapter, mac, false, + adapter->if_handle, 0); + } else { status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, NULL, 0); - else if (be_physfn(adapter)) - status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0); - else - status = be_cmd_mac_addr_query(adapter, mac, false, - adapter->if_handle, 0); + } + return status; } @@ -2816,6 +2816,25 @@ err: return status; } +/* Wrapper to delete any active MACs and provision the new mac. + * Changes to MAC_LIST are allowed iff none of the MAC addresses in the + * current list are active. + */ +int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) +{ + bool active_mac = false; + u8 old_mac[ETH_ALEN]; + u32 pmac_id; + int status; + + status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, + &pmac_id, dom); + if (!status && active_mac) + be_cmd_pmac_del(adapter, if_id, pmac_id, dom); + + return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); +} + int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, u16 intf_id) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5e9e2b0..eb541f0 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1937,6 +1937,8 @@ extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); +extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, + u32 dom); extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, u16 intf_id); extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 73b0009..eec751d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1146,9 +1146,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; int status; - bool active_mac = false; - u32 pmac_id; - u8 old_mac[ETH_ALEN]; if (!sriov_enabled(adapter)) return -EPERM; @@ -1156,20 +1153,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) return -EINVAL; - if (lancer_chip(adapter)) { - status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, - &pmac_id, vf + 1); - if (!status && active_mac) - be_cmd_pmac_del(adapter, vf_cfg->if_handle, - pmac_id, vf + 1); - - status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); - } else { - status = be_cmd_pmac_del(adapter, vf_cfg->if_handle, - vf_cfg->pmac_id, vf + 1); + if (BEx_chip(adapter)) { + be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id, + vf + 1); status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, &vf_cfg->pmac_id, vf + 1); + } else { + status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, + vf + 1); } if (status) @@ -2735,13 +2727,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter) be_vf_eth_addr_generate(adapter, mac); for_all_vfs(adapter, vf_cfg, vf) { - if (lancer_chip(adapter)) { - status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); - } else { + if (BEx_chip(adapter)) status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, &vf_cfg->pmac_id, vf + 1); - } + else + status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, + vf + 1); if (status) dev_err(&adapter->pdev->dev, @@ -2788,11 +2780,12 @@ static void be_vf_clear(struct be_adapter *adapter) pci_disable_sriov(adapter->pdev); for_all_vfs(adapter, vf_cfg, vf) { - if (lancer_chip(adapter)) - be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); - else + if (BEx_chip(adapter)) be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id, vf + 1); + else + be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle, + vf + 1); be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } -- cgit v0.10.2 From 2d17f4031475f2e836dd06d5b03593ee59f12fbd Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 23 Jul 2013 15:25:04 +0530 Subject: be2net: delete primary MAC address while unloading Currently the UC-list is being deleted from the HW MAC table, but the primary MAC is not. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eec751d..67deb21 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2796,7 +2796,7 @@ done: static int be_clear(struct be_adapter *adapter) { - int i = 1; + int i; if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { cancel_delayed_work_sync(&adapter->work); @@ -2806,9 +2806,11 @@ static int be_clear(struct be_adapter *adapter) if (sriov_enabled(adapter)) be_vf_clear(adapter); - for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) + /* delete the primary mac along with the uc-mac list */ + for (i = 0; i < (adapter->uc_macs + 1); i++) be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; be_cmd_if_destroy(adapter, adapter->if_handle, 0); -- cgit v0.10.2 From 59ea52dc461ebb05b78545064604d92faf8bb16f Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Mon, 22 Jul 2013 14:59:16 +0200 Subject: net: trans_rdma: remove unused function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch gets rid of the following warning: net/9p/trans_rdma.c:594:12: warning: ‘rdma_cancelled’ defined but not used [-Wunused-function] static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req) The rdma_cancelled function is not called anywhere in the kernel Signed-off-by: Andi Shyti Signed-off-by: David S. Miller diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 928f2bb..8f68df5 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -588,17 +588,6 @@ static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) return 1; } -/* A request has been fully flushed without a reply. - * That means we have posted one buffer in excess. - */ -static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req) -{ - struct p9_trans_rdma *rdma = client->trans; - - atomic_inc(&rdma->excess_rc); - return 0; -} - /** * trans_create_rdma - Transport method for creating atransport instance * @client: client instance -- cgit v0.10.2 From f13bbc2f9aba00c7a37b499d23060616b9a4ef9e Mon Sep 17 00:00:00 2001 From: Neel Patel Date: Mon, 22 Jul 2013 09:59:18 -0700 Subject: drivers/net: enic: Move ethtool code to a separate file This patch moves all enic ethtool hooks from enic_main.c to a new file enic_ethtool.c Signed-off-by: Neel Patel Signed-off-by: Christian Benvenuti Signed-off-by: Nishank Trivedi Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile index 9d4974b..e52296d 100644 --- a/drivers/net/ethernet/cisco/enic/Makefile +++ b/drivers/net/ethernet/cisco/enic/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_ENIC) := enic.o enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ - enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o + enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ + enic_ethtool.o diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index afe9b16..2e37c63 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -127,9 +127,57 @@ static inline struct device *enic_get_dev(struct enic *enic) return &(enic->pdev->dev); } +static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) +{ + return rq; +} + +static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) +{ + return enic->rq_count + wq; +} + +static inline unsigned int enic_legacy_io_intr(void) +{ + return 0; +} + +static inline unsigned int enic_legacy_err_intr(void) +{ + return 1; +} + +static inline unsigned int enic_legacy_notify_intr(void) +{ + return 2; +} + +static inline unsigned int enic_msix_rq_intr(struct enic *enic, + unsigned int rq) +{ + return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; +} + +static inline unsigned int enic_msix_wq_intr(struct enic *enic, + unsigned int wq) +{ + return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; +} + +static inline unsigned int enic_msix_err_intr(struct enic *enic) +{ + return enic->rq_count + enic->wq_count; +} + +static inline unsigned int enic_msix_notify_intr(struct enic *enic) +{ + return enic->rq_count + enic->wq_count + 1; +} + void enic_reset_addr_lists(struct enic *enic); int enic_sriov_enabled(struct enic *enic); int enic_is_valid_vf(struct enic *enic, int vf); int enic_is_dynamic(struct enic *enic); +void enic_set_ethtool_ops(struct net_device *netdev); #endif /* _ENIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h index 08bded0..129b14a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.h +++ b/drivers/net/ethernet/cisco/enic/enic_dev.h @@ -20,6 +20,7 @@ #define _ENIC_DEV_H_ #include "vnic_dev.h" +#include "vnic_vic.h" /* * Calls the devcmd function given by argument vnicdevcmdfn. diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c new file mode 100644 index 0000000..47e3562 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -0,0 +1,257 @@ +/** + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "enic_res.h" +#include "enic.h" +#include "enic_dev.h" + +struct enic_stat { + char name[ETH_GSTRING_LEN]; + unsigned int index; +}; + +#define ENIC_TX_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \ +} + +#define ENIC_RX_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \ +} + +static const struct enic_stat enic_tx_stats[] = { + ENIC_TX_STAT(tx_frames_ok), + ENIC_TX_STAT(tx_unicast_frames_ok), + ENIC_TX_STAT(tx_multicast_frames_ok), + ENIC_TX_STAT(tx_broadcast_frames_ok), + ENIC_TX_STAT(tx_bytes_ok), + ENIC_TX_STAT(tx_unicast_bytes_ok), + ENIC_TX_STAT(tx_multicast_bytes_ok), + ENIC_TX_STAT(tx_broadcast_bytes_ok), + ENIC_TX_STAT(tx_drops), + ENIC_TX_STAT(tx_errors), + ENIC_TX_STAT(tx_tso), +}; + +static const struct enic_stat enic_rx_stats[] = { + ENIC_RX_STAT(rx_frames_ok), + ENIC_RX_STAT(rx_frames_total), + ENIC_RX_STAT(rx_unicast_frames_ok), + ENIC_RX_STAT(rx_multicast_frames_ok), + ENIC_RX_STAT(rx_broadcast_frames_ok), + ENIC_RX_STAT(rx_bytes_ok), + ENIC_RX_STAT(rx_unicast_bytes_ok), + ENIC_RX_STAT(rx_multicast_bytes_ok), + ENIC_RX_STAT(rx_broadcast_bytes_ok), + ENIC_RX_STAT(rx_drop), + ENIC_RX_STAT(rx_no_bufs), + ENIC_RX_STAT(rx_errors), + ENIC_RX_STAT(rx_rss), + ENIC_RX_STAT(rx_crc_errors), + ENIC_RX_STAT(rx_frames_64), + ENIC_RX_STAT(rx_frames_127), + ENIC_RX_STAT(rx_frames_255), + ENIC_RX_STAT(rx_frames_511), + ENIC_RX_STAT(rx_frames_1023), + ENIC_RX_STAT(rx_frames_1518), + ENIC_RX_STAT(rx_frames_to_max), +}; + +static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); +static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); + +static int enic_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(netdev)) { + ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static void enic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_devcmd_fw_info *fw_info; + + enic_dev_fw_info(enic, &fw_info); + + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, fw_info->fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(enic->pdev), + sizeof(drvinfo->bus_info)); +} + +static void enic_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < enic_n_tx_stats; i++) { + memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < enic_n_rx_stats; i++) { + memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int enic_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return enic_n_tx_stats + enic_n_rx_stats; + default: + return -EOPNOTSUPP; + } +} + +static void enic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_stats *vstats; + unsigned int i; + + enic_dev_stats_dump(enic, &vstats); + + for (i = 0; i < enic_n_tx_stats; i++) + *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; + for (i = 0; i < enic_n_rx_stats; i++) + *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; +} + +static u32 enic_get_msglevel(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + return enic->msg_enable; +} + +static void enic_set_msglevel(struct net_device *netdev, u32 value) +{ + struct enic *enic = netdev_priv(netdev); + enic->msg_enable = value; +} + +static int enic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + + ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; + ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; + + return 0; +} + +static int enic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + u32 tx_coalesce_usecs; + u32 rx_coalesce_usecs; + unsigned int i, intr; + + tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + if (tx_coalesce_usecs != rx_coalesce_usecs) + return -EINVAL; + + intr = enic_legacy_io_intr(); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + tx_coalesce_usecs); + break; + case VNIC_DEV_INTR_MODE_MSI: + if (tx_coalesce_usecs != rx_coalesce_usecs) + return -EINVAL; + + vnic_intr_coalescing_timer_set(&enic->intr[0], + tx_coalesce_usecs); + break; + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < enic->wq_count; i++) { + intr = enic_msix_wq_intr(enic, i); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + tx_coalesce_usecs); + } + + for (i = 0; i < enic->rq_count; i++) { + intr = enic_msix_rq_intr(enic, i); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + rx_coalesce_usecs); + } + + break; + default: + break; + } + + enic->tx_coalesce_usecs = tx_coalesce_usecs; + enic->rx_coalesce_usecs = rx_coalesce_usecs; + + return 0; +} + +static const struct ethtool_ops enic_ethtool_ops = { + .get_settings = enic_get_settings, + .get_drvinfo = enic_get_drvinfo, + .get_msglevel = enic_get_msglevel, + .set_msglevel = enic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = enic_get_strings, + .get_sset_count = enic_get_sset_count, + .get_ethtool_stats = enic_get_ethtool_stats, + .get_coalesce = enic_get_coalesce, + .set_coalesce = enic_set_coalesce, +}; + +void enic_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops); +} diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 992ec2e..b12b32b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -73,57 +72,6 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, enic_id_table); -struct enic_stat { - char name[ETH_GSTRING_LEN]; - unsigned int offset; -}; - -#define ENIC_TX_STAT(stat) \ - { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } -#define ENIC_RX_STAT(stat) \ - { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } - -static const struct enic_stat enic_tx_stats[] = { - ENIC_TX_STAT(tx_frames_ok), - ENIC_TX_STAT(tx_unicast_frames_ok), - ENIC_TX_STAT(tx_multicast_frames_ok), - ENIC_TX_STAT(tx_broadcast_frames_ok), - ENIC_TX_STAT(tx_bytes_ok), - ENIC_TX_STAT(tx_unicast_bytes_ok), - ENIC_TX_STAT(tx_multicast_bytes_ok), - ENIC_TX_STAT(tx_broadcast_bytes_ok), - ENIC_TX_STAT(tx_drops), - ENIC_TX_STAT(tx_errors), - ENIC_TX_STAT(tx_tso), -}; - -static const struct enic_stat enic_rx_stats[] = { - ENIC_RX_STAT(rx_frames_ok), - ENIC_RX_STAT(rx_frames_total), - ENIC_RX_STAT(rx_unicast_frames_ok), - ENIC_RX_STAT(rx_multicast_frames_ok), - ENIC_RX_STAT(rx_broadcast_frames_ok), - ENIC_RX_STAT(rx_bytes_ok), - ENIC_RX_STAT(rx_unicast_bytes_ok), - ENIC_RX_STAT(rx_multicast_bytes_ok), - ENIC_RX_STAT(rx_broadcast_bytes_ok), - ENIC_RX_STAT(rx_drop), - ENIC_RX_STAT(rx_no_bufs), - ENIC_RX_STAT(rx_errors), - ENIC_RX_STAT(rx_rss), - ENIC_RX_STAT(rx_crc_errors), - ENIC_RX_STAT(rx_frames_64), - ENIC_RX_STAT(rx_frames_127), - ENIC_RX_STAT(rx_frames_255), - ENIC_RX_STAT(rx_frames_511), - ENIC_RX_STAT(rx_frames_1023), - ENIC_RX_STAT(rx_frames_1518), - ENIC_RX_STAT(rx_frames_to_max), -}; - -static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); -static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); - int enic_is_dynamic(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; @@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf) #endif } -static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) -{ - return rq; -} - -static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) -{ - return enic->rq_count + wq; -} - -static inline unsigned int enic_legacy_io_intr(void) -{ - return 0; -} - -static inline unsigned int enic_legacy_err_intr(void) -{ - return 1; -} - -static inline unsigned int enic_legacy_notify_intr(void) -{ - return 2; -} - -static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq) -{ - return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; -} - -static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq) -{ - return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; -} - -static inline unsigned int enic_msix_err_intr(struct enic *enic) -{ - return enic->rq_count + enic->wq_count; -} - -static inline unsigned int enic_msix_notify_intr(struct enic *enic) -{ - return enic->rq_count + enic->wq_count + 1; -} - -static int enic_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct enic *enic = netdev_priv(netdev); - - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; - - if (netif_carrier_ok(netdev)) { - ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - ecmd->autoneg = AUTONEG_DISABLE; - - return 0; -} - -static void enic_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct enic *enic = netdev_priv(netdev); - struct vnic_devcmd_fw_info *fw_info; - - enic_dev_fw_info(enic, &fw_info); - - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, fw_info->fw_version, - sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, pci_name(enic->pdev), - sizeof(drvinfo->bus_info)); -} - -static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - unsigned int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < enic_n_tx_stats; i++) { - memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } - for (i = 0; i < enic_n_rx_stats; i++) { - memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } - break; - } -} - -static int enic_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return enic_n_tx_stats + enic_n_rx_stats; - default: - return -EOPNOTSUPP; - } -} - -static void enic_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct enic *enic = netdev_priv(netdev); - struct vnic_stats *vstats; - unsigned int i; - - enic_dev_stats_dump(enic, &vstats); - - for (i = 0; i < enic_n_tx_stats; i++) - *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; - for (i = 0; i < enic_n_rx_stats; i++) - *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; -} - -static u32 enic_get_msglevel(struct net_device *netdev) -{ - struct enic *enic = netdev_priv(netdev); - return enic->msg_enable; -} - -static void enic_set_msglevel(struct net_device *netdev, u32 value) -{ - struct enic *enic = netdev_priv(netdev); - enic->msg_enable = value; -} - -static int enic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) -{ - struct enic *enic = netdev_priv(netdev); - - ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; - ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; - - return 0; -} - -static int enic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) -{ - struct enic *enic = netdev_priv(netdev); - u32 tx_coalesce_usecs; - u32 rx_coalesce_usecs; - unsigned int i, intr; - - tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, - vnic_dev_get_intr_coal_timer_max(enic->vdev)); - rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, - vnic_dev_get_intr_coal_timer_max(enic->vdev)); - - switch (vnic_dev_get_intr_mode(enic->vdev)) { - case VNIC_DEV_INTR_MODE_INTX: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - - intr = enic_legacy_io_intr(); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSI: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - - vnic_intr_coalescing_timer_set(&enic->intr[0], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSIX: - for (i = 0; i < enic->wq_count; i++) { - intr = enic_msix_wq_intr(enic, i); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - } - - for (i = 0; i < enic->rq_count; i++) { - intr = enic_msix_rq_intr(enic, i); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - rx_coalesce_usecs); - } - - break; - default: - break; - } - - enic->tx_coalesce_usecs = tx_coalesce_usecs; - enic->rx_coalesce_usecs = rx_coalesce_usecs; - - return 0; -} - -static const struct ethtool_ops enic_ethtool_ops = { - .get_settings = enic_get_settings, - .get_drvinfo = enic_get_drvinfo, - .get_msglevel = enic_get_msglevel, - .set_msglevel = enic_set_msglevel, - .get_link = ethtool_op_get_link, - .get_strings = enic_get_strings, - .get_sset_count = enic_get_sset_count, - .get_ethtool_stats = enic_get_ethtool_stats, - .get_coalesce = enic_get_coalesce, - .set_coalesce = enic_set_coalesce, -}; - static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { struct enic *enic = vnic_dev_priv(wq->vdev); @@ -2496,7 +2228,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &enic_netdev_ops; netdev->watchdog_timeo = 2 * HZ; - netdev->ethtool_ops = &enic_ethtool_ops; + enic_set_ethtool_ops(netdev); netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (ENIC_SETTING(enic, LOOP)) { -- cgit v0.10.2 From c4cdef9b7183159c23c7302aaf270d64c549f557 Mon Sep 17 00:00:00 2001 From: dingtianhong Date: Tue, 23 Jul 2013 15:25:27 +0800 Subject: bonding: don't call slave_xxx_netpoll under spinlocks The slave_xxx_netpoll will call synchronize_rcu_bh(), so the function may schedule and sleep, it should't be called under spinlocks. bond_netpoll_setup() and bond_netpoll_cleanup() are always protected by rtnl lock, it is no need to take the read lock, as the slave list couldn't be changed outside rtnl lock. Signed-off-by: Ding Tianhong Cc: Jay Vosburgh Cc: Andy Gospodarek Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ae9864c..5cd8e0a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1219,8 +1219,9 @@ static void bond_poll_controller(struct net_device *bond_dev) { } -static void __bond_netpoll_cleanup(struct bonding *bond) +static void bond_netpoll_cleanup(struct net_device *bond_dev) { + struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; int i; @@ -1228,14 +1229,6 @@ static void __bond_netpoll_cleanup(struct bonding *bond) if (IS_UP(slave->dev)) slave_disable_netpoll(slave); } -static void bond_netpoll_cleanup(struct net_device *bond_dev) -{ - struct bonding *bond = netdev_priv(bond_dev); - - read_lock(&bond->lock); - __bond_netpoll_cleanup(bond); - read_unlock(&bond->lock); -} static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) { @@ -1243,15 +1236,13 @@ static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, g struct slave *slave; int i, err = 0; - read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { err = slave_enable_netpoll(slave); if (err) { - __bond_netpoll_cleanup(bond); + bond_netpoll_cleanup(dev); break; } } - read_unlock(&bond->lock); return err; } -- cgit v0.10.2 From 38c4916a7874aff9dc07a68bf8e7e4136b00f8dd Mon Sep 17 00:00:00 2001 From: dingtianhong Date: Tue, 23 Jul 2013 15:25:32 +0800 Subject: bonding: bond_sysfs.c checkpatch cleanup net/bonding/bond_sysfs.c:1302: ERROR: else should follow close brace '}' net/bonding/bond_sysfs.c:1314: ERROR: else should follow close brace '}' Signed-off-by: Ding Tianhong Cc: Jay Vosburgh Cc: Andy Gospodarek Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index dc36a3d..984ca02 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1295,8 +1295,7 @@ static ssize_t bonding_store_active_slave(struct device *d, bond->dev->name, slave->dev->name); goto out; - } - else { + } else { if ((new_active) && (old_active) && (new_active->link == BOND_LINK_UP) && @@ -1307,8 +1306,7 @@ static ssize_t bonding_store_active_slave(struct device *d, slave->dev->name); bond_change_active_slave(bond, new_active); - } - else { + } else { pr_info("%s: Could not set %s as" " active slave; either %s is" " down or the link is down.\n", -- cgit v0.10.2 From 9402b746e7ba0f317ffa465a4c5128c7c220f27b Mon Sep 17 00:00:00 2001 From: dingtianhong Date: Tue, 23 Jul 2013 15:25:39 +0800 Subject: bonding: add rtnl protection for bonding_store_fail_over_mac We need rtnl protection while reading slave_cnt and updating the .fail_over_mac, and it also follows the logic "don't change anything slave-related without rtnl". :) Signed-off-by: Ding Tianhong Cc: Jay Vosburgh Cc: Andy Gospodarek Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 984ca02..ae02c19 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -501,20 +501,25 @@ static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - int new_value; + int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); + if (bond->slave_cnt != 0) { pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n", bond->dev->name); - return -EPERM; + ret = -EPERM; + goto out; } new_value = bond_parse_parm(buf, fail_over_mac_tbl); if (new_value < 0) { pr_err("%s: Ignoring invalid fail_over_mac value %s.\n", bond->dev->name, buf); - return -EINVAL; + ret = -EINVAL; + goto out; } bond->params.fail_over_mac = new_value; @@ -522,7 +527,9 @@ static ssize_t bonding_store_fail_over_mac(struct device *d, bond->dev->name, fail_over_mac_tbl[new_value].modename, new_value); - return count; +out: + rtnl_unlock(); + return ret; } static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, -- cgit v0.10.2 From b07ea07bd0fa63bfb74dbd803ac3bb9e14dc630b Mon Sep 17 00:00:00 2001 From: dingtianhong Date: Tue, 23 Jul 2013 15:25:47 +0800 Subject: bonding: Fixed up a error "do not initialise statics to 0 or NULL" in bond_main.c The error is found by the checkpatch.pl tools. Signed-off-by: Ding Tianhong Cc: Jay Vosburgh Cc: Andy Gospodarek Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5cd8e0a..414d07e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -106,7 +106,7 @@ static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; static char *arp_validate; static char *arp_all_targets; static char *fail_over_mac; -static int all_slaves_active = 0; +static int all_slaves_active; static struct bond_params bonding_defaults; static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; -- cgit v0.10.2 From d97185466cc83902de49c7bea512651c7af12566 Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Tue, 23 Jul 2013 15:38:17 +0530 Subject: drivers: net: cpsw: add support to show hw stats via ethtool Add support to show CPSW hardware statistics to user via ethtool so user can find if there were any error reported by hardware or the system is over loaded duing high data rate transfer. Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 05a1674..1cd1c00 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -91,6 +91,7 @@ do { \ #define CPSW1_SLAVE_SIZE 0x040 #define CPSW1_CPDMA_OFFSET 0x100 #define CPSW1_STATERAM_OFFSET 0x200 +#define CPSW1_HW_STATS 0x400 #define CPSW1_CPTS_OFFSET 0x500 #define CPSW1_ALE_OFFSET 0x600 #define CPSW1_SLIVER_OFFSET 0x700 @@ -99,6 +100,7 @@ do { \ #define CPSW2_SLAVE_OFFSET 0x200 #define CPSW2_SLAVE_SIZE 0x100 #define CPSW2_CPDMA_OFFSET 0x800 +#define CPSW2_HW_STATS 0x900 #define CPSW2_STATERAM_OFFSET 0xa00 #define CPSW2_CPTS_OFFSET 0xc00 #define CPSW2_ALE_OFFSET 0xd00 @@ -299,6 +301,44 @@ struct cpsw_sliver_regs { u32 rx_pri_map; }; +struct cpsw_hw_stats { + u32 rxgoodframes; + u32 rxbroadcastframes; + u32 rxmulticastframes; + u32 rxpauseframes; + u32 rxcrcerrors; + u32 rxaligncodeerrors; + u32 rxoversizedframes; + u32 rxjabberframes; + u32 rxundersizedframes; + u32 rxfragments; + u32 __pad_0[2]; + u32 rxoctets; + u32 txgoodframes; + u32 txbroadcastframes; + u32 txmulticastframes; + u32 txpauseframes; + u32 txdeferredframes; + u32 txcollisionframes; + u32 txsinglecollframes; + u32 txmultcollframes; + u32 txexcessivecollisions; + u32 txlatecollisions; + u32 txunderrun; + u32 txcarriersenseerrors; + u32 txoctets; + u32 octetframes64; + u32 octetframes65t127; + u32 octetframes128t255; + u32 octetframes256t511; + u32 octetframes512t1023; + u32 octetframes1024tup; + u32 netoctets; + u32 rxsofoverruns; + u32 rxmofoverruns; + u32 rxdmaoverruns; +}; + struct cpsw_slave { void __iomem *regs; struct cpsw_sliver_regs __iomem *sliver; @@ -332,6 +372,7 @@ struct cpsw_priv { struct cpsw_platform_data data; struct cpsw_ss_regs __iomem *regs; struct cpsw_wr_regs __iomem *wr_regs; + u8 __iomem *hw_stats; struct cpsw_host_regs __iomem *host_port_regs; u32 msg_enable; u32 version; @@ -354,6 +395,94 @@ struct cpsw_priv { u32 emac_port; }; +struct cpsw_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +enum { + CPSW_STATS, + CPDMA_RX_STATS, + CPDMA_TX_STATS, +}; + +#define CPSW_STAT(m) CPSW_STATS, \ + sizeof(((struct cpsw_hw_stats *)0)->m), \ + offsetof(struct cpsw_hw_stats, m) +#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) +#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) + +static const struct cpsw_stats cpsw_gstrings_stats[] = { + { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, + { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, + { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, + { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, + { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, + { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, + { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, + { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, + { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, + { "Rx Fragments", CPSW_STAT(rxfragments) }, + { "Rx Octets", CPSW_STAT(rxoctets) }, + { "Good Tx Frames", CPSW_STAT(txgoodframes) }, + { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, + { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, + { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, + { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, + { "Collisions", CPSW_STAT(txcollisionframes) }, + { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, + { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, + { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, + { "Late Collisions", CPSW_STAT(txlatecollisions) }, + { "Tx Underrun", CPSW_STAT(txunderrun) }, + { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, + { "Tx Octets", CPSW_STAT(txoctets) }, + { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, + { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, + { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, + { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, + { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, + { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, + { "Net Octets", CPSW_STAT(netoctets) }, + { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, + { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, + { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, + { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) }, + { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, + { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, + { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) }, + { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, + { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, + { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, + { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, + { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, + { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, + { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) }, + { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) }, + { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, + { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) }, + { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) }, + { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) }, + { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) }, + { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) }, + { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) }, + { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) }, + { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) }, + { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) }, + { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) }, + { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) }, + { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) }, + { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) }, +}; + +#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats) + #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) #define for_each_slave(priv, func, arg...) \ do { \ @@ -723,6 +852,69 @@ static int cpsw_set_coalesce(struct net_device *ndev, return 0; } +static int cpsw_get_sset_count(struct net_device *ndev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return CPSW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < CPSW_STATS_LEN; i++) { + memcpy(p, cpsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void cpsw_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpdma_chan_stats rx_stats; + struct cpdma_chan_stats tx_stats; + u32 val; + u8 *p; + int i; + + /* Collect Davinci CPDMA stats for Rx and Tx Channel */ + cpdma_chan_get_stats(priv->rxch, &rx_stats); + cpdma_chan_get_stats(priv->txch, &tx_stats); + + for (i = 0; i < CPSW_STATS_LEN; i++) { + switch (cpsw_gstrings_stats[i].type) { + case CPSW_STATS: + val = readl(priv->hw_stats + + cpsw_gstrings_stats[i].stat_offset); + data[i] = val; + break; + + case CPDMA_RX_STATS: + p = (u8 *)&rx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + + case CPDMA_TX_STATS: + p = (u8 *)&tx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + } + } +} + static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) { static char *leader = "........................................"; @@ -1426,6 +1618,9 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .set_settings = cpsw_set_settings, .get_coalesce = cpsw_get_coalesce, .set_coalesce = cpsw_set_coalesce, + .get_sset_count = cpsw_get_sset_count, + .get_strings = cpsw_get_strings, + .get_ethtool_stats = cpsw_get_ethtool_stats, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1623,6 +1818,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, priv_sl2->host_port = priv->host_port; priv_sl2->host_port_regs = priv->host_port_regs; priv_sl2->wr_regs = priv->wr_regs; + priv_sl2->hw_stats = priv->hw_stats; priv_sl2->dma = priv->dma; priv_sl2->txch = priv->txch; priv_sl2->rxch = priv->rxch; @@ -1780,7 +1976,8 @@ static int cpsw_probe(struct platform_device *pdev) switch (priv->version) { case CPSW_VERSION_1: priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW1_HW_STATS; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -1791,7 +1988,8 @@ static int cpsw_probe(struct platform_device *pdev) break; case CPSW_VERSION_2: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW2_HW_STATS; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; -- cgit v0.10.2 From 91705c61b52029ab5da67a15a23eef08667bf40e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 23 Jul 2013 14:51:47 +0200 Subject: net: sctp: trivial: update mailing list address The SCTP mailing list address to send patches or questions to is linux-sctp@vger.kernel.org and not lksctp-developers@lists.sourceforge.net anymore. Therefore, update all occurences. Signed-off-by: Daniel Borkmann Acked-by: Neil Horman Acked-by: Vlad Yasevich Signed-off-by: David S. Miller diff --git a/Documentation/networking/sctp.txt b/Documentation/networking/sctp.txt index 0c790a7..97b810c 100644 --- a/Documentation/networking/sctp.txt +++ b/Documentation/networking/sctp.txt @@ -19,7 +19,6 @@ of SCTP that is RFC 2960 compliant and provides an programming interface referred to as the UDP-style API of the Sockets Extensions for SCTP, as proposed in IETF Internet-Drafts. - Caveats: -lksctp can be built as statically or as a module. However, be aware that @@ -33,6 +32,4 @@ For more information, please visit the lksctp project website: http://www.sf.net/projects/lksctp Or contact the lksctp developers through the mailing list: - - - + diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h index 49bc957..754c511 100644 --- a/include/net/sctp/auth.h +++ b/include/net/sctp/auth.h @@ -22,7 +22,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index 0cb08e6..78b88aa 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h @@ -25,7 +25,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index ca50e075..a1e7aa5 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -25,7 +25,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index d8e37ec..554cf88 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -27,7 +27,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 2a82d13..2aa66dd 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -27,7 +27,7 @@ * * Please send any bug reports or fixes you make to the * email addresses: - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index e745c92..75c4c16 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -25,7 +25,7 @@ * * Please send any bug reports or fixes you make to the * email addresses: - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h index 2c5d2b4..c361d6f 100644 --- a/include/net/sctp/tsnmap.h +++ b/include/net/sctp/tsnmap.h @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index ca4693b..34f0d34 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -31,7 +31,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index 00e50ba..add3237 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -30,7 +30,7 @@ * * Please send any bug reports or fixes you make to the * email addresses: - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 66b466e..ca451e9 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/associola.c b/net/sctp/associola.c index bce5b79..e425ba0 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/auth.c b/net/sctp/auth.c index ba1dfc3..3aab967 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -22,7 +22,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 64977ea..f34ce8b 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -27,7 +27,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 5780565..b50b90c 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -24,7 +24,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/command.c b/net/sctp/command.c index c004401..f4bebda 100644 --- a/net/sctp/command.c +++ b/net/sctp/command.c @@ -25,7 +25,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/debug.c b/net/sctp/debug.c index f499878..44aa4e7 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 9e3d257..825b754 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -29,7 +29,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/input.c b/net/sctp/input.c index 3fa4d85..7993495 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -29,7 +29,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cb25f04..e70f60a 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -30,7 +30,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 09ffcc9..85d688f 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -27,7 +27,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c index fe012c4..aec346c 100644 --- a/net/sctp/objcnt.c +++ b/net/sctp/objcnt.c @@ -26,7 +26,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/output.c b/net/sctp/output.c index a46d1eb..5a55c55d 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -26,7 +26,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index ef9e2bb..5131323 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c index 794bb14..31fa437 100644 --- a/net/sctp/primitive.c +++ b/net/sctp/primitive.c @@ -29,7 +29,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 62526c4..aff0cac 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -22,7 +22,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 4a17494d..b52ec25 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -29,7 +29,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp @@ -1547,7 +1547,7 @@ module_exit(sctp_exit); */ MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); -MODULE_AUTHOR("Linux Kernel SCTP developers "); +MODULE_AUTHOR("Linux Kernel SCTP developers "); MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); module_param_named(no_checksums, sctp_checksum_disable, bool, 0644); MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification"); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 362ae6e..780a2d4 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -29,7 +29,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 9da6885..f1f3aac 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index f6b7109..93271f0 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 84d98d8..64ea5fd 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c6670d2..0245712 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -34,7 +34,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index da86035..72b5939 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c @@ -24,7 +24,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 9a5c4c9..1906747 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -25,7 +25,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/transport.c b/net/sctp/transport.c index bdbbc3f..9602c52 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -30,7 +30,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index b460195..0eff866 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c @@ -27,7 +27,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 44a45db..c07624f 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -28,7 +28,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 04e3d47..2cdb301 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -27,7 +27,7 @@ * * Please send any bug reports or fixes you make to the * email address(es): - * lksctp developers + * lksctp developers * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp -- cgit v0.10.2 From 4d58c0252002646ea522f5c6c623e558af459b39 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 23 Jul 2013 14:51:48 +0200 Subject: net: sctp: trivial: add uapi/linux/sctp.h into maintainers After this file has moved to the uapi section, we also need to update this in the maintainers file. Signed-off-by: Daniel Borkmann Acked-by: Neil Horman Acked-by: Vlad Yasevich Signed-off-by: David S. Miller diff --git a/MAINTAINERS b/MAINTAINERS index bf61e04..339e798 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7216,6 +7216,7 @@ W: http://lksctp.sourceforge.net S: Maintained F: Documentation/networking/sctp.txt F: include/linux/sctp.h +F: include/uapi/linux/sctp.h F: include/net/sctp/ F: net/sctp/ -- cgit v0.10.2 From 64dc61306ce7da370833289739e2f52dfc6b37ba Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 22 Jul 2013 20:26:31 -0700 Subject: net: add sk_stream_is_writeable() helper Several call sites use the hardcoded following condition : sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) Lets use a helper because TCP_NOTSENT_LOWAT support will change this condition for TCP sockets. Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller diff --git a/include/net/sock.h b/include/net/sock.h index e0473f6..d0b5fde 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1088,6 +1088,10 @@ static inline struct cg_proto *parent_cg_proto(struct proto *proto, } #endif +static inline bool sk_stream_is_writeable(const struct sock *sk) +{ + return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk); +} static inline bool sk_has_memory_pressure(const struct sock *sk) { diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index eb0a46a..3be308e 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -409,7 +409,7 @@ static void ceph_sock_write_space(struct sock *sk) * and net/core/stream.c:sk_stream_write_space(). */ if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { + if (sk_stream_is_writeable(sk)) { dout("%s %p queueing write work\n", __func__, con); clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); queue_con(con); diff --git a/net/core/stream.c b/net/core/stream.c index f5df85d..512f0a2 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -30,7 +30,7 @@ void sk_stream_write_space(struct sock *sk) struct socket *sock = sk->sk_socket; struct socket_wq *wq; - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { + if (sk_stream_is_writeable(sk) && sock) { clear_bit(SOCK_NOSPACE, &sock->flags); rcu_read_lock(); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 6c7c78b..ba64750 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -336,7 +336,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock, mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { + if (sk_stream_is_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, @@ -347,7 +347,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock, * wspace test but before the flags are set, * IO signal will be lost. */ - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) + if (sk_stream_is_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5423223..5eca906 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -499,7 +499,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { + if (sk_stream_is_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, @@ -510,7 +510,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) * wspace test but before the flags are set, * IO signal will be lost. */ - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) + if (sk_stream_is_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } } else diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 305374d..0da6785 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -442,7 +442,7 @@ static void svc_tcp_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) + if (sk_stream_is_writeable(sk) && sock) clear_bit(SOCK_NOSPACE, &sock->flags); svc_write_space(sk); } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index ddf0602..d6656d7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1602,7 +1602,7 @@ static void xs_tcp_write_space(struct sock *sk) read_lock_bh(&sk->sk_callback_lock); /* from net/core/stream.c:sk_stream_write_space */ - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) + if (sk_stream_is_writeable(sk)) xs_write_space(sk); read_unlock_bh(&sk->sk_callback_lock); -- cgit v0.10.2 From c9bee3b7fdecb0c1d070c7b54113b3bdfb9a3d36 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 22 Jul 2013 20:27:07 -0700 Subject: tcp: TCP_NOTSENT_LOWAT socket option Idea of this patch is to add optional limitation of number of unsent bytes in TCP sockets, to reduce usage of kernel memory. TCP receiver might announce a big window, and TCP sender autotuning might allow a large amount of bytes in write queue, but this has little performance impact if a large part of this buffering is wasted : Write queue needs to be large only to deal with large BDP, not necessarily to cope with scheduling delays (incoming ACKS make room for the application to queue more bytes) For most workloads, using a value of 128 KB or less is OK to give applications enough time to react to POLLOUT events in time (or being awaken in a blocking sendmsg()) This patch adds two ways to set the limit : 1) Per socket option TCP_NOTSENT_LOWAT 2) A sysctl (/proc/sys/net/ipv4/tcp_notsent_lowat) for sockets not using TCP_NOTSENT_LOWAT socket option (or setting a zero value) Default value being UINT_MAX (0xFFFFFFFF), meaning this has no effect. This changes poll()/select()/epoll() to report POLLOUT only if number of unsent bytes is below tp->nosent_lowat Note this might increase number of sendmsg()/sendfile() calls when using non blocking sockets, and increase number of context switches for blocking sockets. Note this is not related to SO_SNDLOWAT (as SO_SNDLOWAT is defined as : Specify the minimum number of bytes in the buffer until the socket layer will pass the data to the protocol) Tested: netperf sessions, and watching /proc/net/protocols "memory" column for TCP With 200 concurrent netperf -t TCP_STREAM sessions, amount of kernel memory used by TCP buffers shrinks by ~55 % (20567 pages instead of 45458) lpq83:~# echo -1 >/proc/sys/net/ipv4/tcp_notsent_lowat lpq83:~# (super_netperf 200 -t TCP_STREAM -H remote -l 90 &); sleep 60 ; grep TCP /proc/net/protocols TCPv6 1880 2 45458 no 208 yes ipv6 y y y y y y y y y y y y y n y y y y y TCP 1696 508 45458 no 208 yes kernel y y y y y y y y y y y y y n y y y y y lpq83:~# echo 131072 >/proc/sys/net/ipv4/tcp_notsent_lowat lpq83:~# (super_netperf 200 -t TCP_STREAM -H remote -l 90 &); sleep 60 ; grep TCP /proc/net/protocols TCPv6 1880 2 20567 no 208 yes ipv6 y y y y y y y y y y y y y n y y y y y TCP 1696 508 20567 no 208 yes kernel y y y y y y y y y y y y y n y y y y y Using 128KB has no bad effect on the throughput or cpu usage of a single flow, although there is an increase of context switches. A bonus is that we hold socket lock for a shorter amount of time and should improve latencies of ACK processing. lpq83:~# echo -1 >/proc/sys/net/ipv4/tcp_notsent_lowat lpq83:~# perf stat -e context-switches ./netperf -H 7.7.7.84 -t omni -l 20 -c -i10,3 OMNI Send TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 7.7.7.84 () port 0 AF_INET : +/-2.500% @ 99% conf. Local Remote Local Elapsed Throughput Throughput Local Local Remote Remote Local Remote Service Send Socket Recv Socket Send Time Units CPU CPU CPU CPU Service Service Demand Size Size Size (sec) Util Util Util Util Demand Demand Units Final Final % Method % Method 1651584 6291456 16384 20.00 17447.90 10^6bits/s 3.13 S -1.00 U 0.353 -1.000 usec/KB Performance counter stats for './netperf -H 7.7.7.84 -t omni -l 20 -c -i10,3': 412,514 context-switches 200.034645535 seconds time elapsed lpq83:~# echo 131072 >/proc/sys/net/ipv4/tcp_notsent_lowat lpq83:~# perf stat -e context-switches ./netperf -H 7.7.7.84 -t omni -l 20 -c -i10,3 OMNI Send TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 7.7.7.84 () port 0 AF_INET : +/-2.500% @ 99% conf. Local Remote Local Elapsed Throughput Throughput Local Local Remote Remote Local Remote Service Send Socket Recv Socket Send Time Units CPU CPU CPU CPU Service Service Demand Size Size Size (sec) Util Util Util Util Demand Demand Units Final Final % Method % Method 1593240 6291456 16384 20.00 17321.16 10^6bits/s 3.35 S -1.00 U 0.381 -1.000 usec/KB Performance counter stats for './netperf -H 7.7.7.84 -t omni -l 20 -c -i10,3': 2,675,818 context-switches 200.029651391 seconds time elapsed Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Yuchung Cheng Acked-By: Yuchung Cheng Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 1074290..53cea9b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -516,6 +516,19 @@ tcp_wmem - vector of 3 INTEGERs: min, default, max this value is ignored. Default: between 64K and 4MB, depending on RAM size. +tcp_notsent_lowat - UNSIGNED INTEGER + A TCP socket can control the amount of unsent bytes in its write queue, + thanks to TCP_NOTSENT_LOWAT socket option. poll()/select()/epoll() + reports POLLOUT events if the amount of unsent bytes is below a per + socket value, and if the write queue is not full. sendmsg() will + also not add new buffers if the limit is hit. + + This global variable controls the amount of unsent data for + sockets not using TCP_NOTSENT_LOWAT. For these sockets, a change + to the global variable has immediate effect. + + Default: UINT_MAX (0xFFFFFFFF) + tcp_workaround_signed_windows - BOOLEAN If set, assume no receipt of a window scaling option means the remote TCP is broken and treats the window as a signed quantity. diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 472120b..9640803 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -238,6 +238,7 @@ struct tcp_sock { u32 rcv_wnd; /* Current receiver window */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ + u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ u32 pushed_seq; /* Last pushed seq, required to talk to windows */ u32 lost_out; /* Lost packets */ u32 sacked_out; /* SACK'd packets */ diff --git a/include/net/sock.h b/include/net/sock.h index d0b5fde..b9f2b09 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -746,11 +746,6 @@ static inline int sk_stream_wspace(const struct sock *sk) extern void sk_stream_write_space(struct sock *sk); -static inline bool sk_stream_memory_free(const struct sock *sk) -{ - return sk->sk_wmem_queued < sk->sk_sndbuf; -} - /* OOB backlog add */ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) { @@ -950,6 +945,7 @@ struct proto { unsigned int inuse_idx; #endif + bool (*stream_memory_free)(const struct sock *sk); /* Memory pressure */ void (*enter_memory_pressure)(struct sock *sk); atomic_long_t *memory_allocated; /* Current allocated memory. */ @@ -1088,11 +1084,22 @@ static inline struct cg_proto *parent_cg_proto(struct proto *proto, } #endif +static inline bool sk_stream_memory_free(const struct sock *sk) +{ + if (sk->sk_wmem_queued >= sk->sk_sndbuf) + return false; + + return sk->sk_prot->stream_memory_free ? + sk->sk_prot->stream_memory_free(sk) : true; +} + static inline bool sk_stream_is_writeable(const struct sock *sk) { - return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk); + return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && + sk_stream_memory_free(sk); } + static inline bool sk_has_memory_pressure(const struct sock *sk) { return sk->sk_prot->memory_pressure != NULL; diff --git a/include/net/tcp.h b/include/net/tcp.h index c586847..18fc999 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -284,6 +284,7 @@ extern int sysctl_tcp_thin_dupack; extern int sysctl_tcp_early_retrans; extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; +extern unsigned int sysctl_tcp_notsent_lowat; extern atomic_long_t tcp_memory_allocated; extern struct percpu_counter tcp_sockets_allocated; @@ -1539,6 +1540,19 @@ extern int tcp_gro_complete(struct sk_buff *skb); extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); +static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) +{ + return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat; +} + +static inline bool tcp_stream_memory_free(const struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + u32 notsent_bytes = tp->write_seq - tp->snd_nxt; + + return notsent_bytes < tcp_notsent_lowat(tp); +} + #ifdef CONFIG_PROC_FS extern int tcp4_proc_init(void); extern void tcp4_proc_exit(void); diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 8d776eb..377f1e5 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -111,6 +111,7 @@ enum { #define TCP_REPAIR_OPTIONS 22 #define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */ #define TCP_TIMESTAMP 24 +#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */ struct tcp_repair_opt { __u32 opt_code; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index b2c123c..69ed203 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -555,6 +555,13 @@ static struct ctl_table ipv4_table[] = { .extra1 = &one, }, { + .procname = "tcp_notsent_lowat", + .data = &sysctl_tcp_notsent_lowat, + .maxlen = sizeof(sysctl_tcp_notsent_lowat), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "tcp_rmem", .data = &sysctl_tcp_rmem, .maxlen = sizeof(sysctl_tcp_rmem), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5eca906..c27e813 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2631,6 +2631,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, else tp->tsoffset = val - tcp_time_stamp; break; + case TCP_NOTSENT_LOWAT: + tp->notsent_lowat = val; + sk->sk_write_space(sk); + break; default: err = -ENOPROTOOPT; break; @@ -2847,6 +2851,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level, case TCP_TIMESTAMP: val = tcp_time_stamp + tp->tsoffset; break; + case TCP_NOTSENT_LOWAT: + val = tp->notsent_lowat; + break; default: return -ENOPROTOOPT; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2e3f129..2a5d5c4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2800,6 +2800,7 @@ struct proto tcp_prot = { .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, + .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .orphan_count = &tcp_orphan_count, .memory_allocated = &tcp_memory_allocated, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 92fde8d..884efff 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -65,6 +65,9 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; /* By default, RFC2861 behavior. */ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; +unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX; +EXPORT_SYMBOL(sysctl_tcp_notsent_lowat); + static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 80fe69e..b792e87 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1924,6 +1924,7 @@ struct proto tcpv6_prot = { .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, + .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, -- cgit v0.10.2 From 18afa4b028b46f8b45ca64f94aefe717c297b07d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 23 Jul 2013 16:13:17 +0200 Subject: net: Make devnet_rename_seq static No users outside net/core/dev.c. Signed-off-by: Thomas Gleixner Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2bb2357..3ca60b0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1666,9 +1666,6 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); extern rwlock_t dev_base_lock; /* Device list lock */ -extern seqcount_t devnet_rename_seq; /* Device rename seq */ - - #define for_each_netdev(net, d) \ list_for_each_entry(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_reverse(net, d) \ diff --git a/net/core/dev.c b/net/core/dev.c index 26755dd..dfd9f5d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -174,7 +174,7 @@ static DEFINE_SPINLOCK(napi_hash_lock); static unsigned int napi_gen_id; static DEFINE_HASHTABLE(napi_hash, 8); -seqcount_t devnet_rename_seq; +static seqcount_t devnet_rename_seq; static inline void dev_base_seq_inc(struct net *net) { -- cgit v0.10.2 From b04d68ebb04aa2d4ab9392c5353a53c81be7b847 Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Sat, 18 May 2013 14:45:55 -0700 Subject: pch_gbe: Use PCH_GBE_PHY_REGS_LEN instead of 32 Avoid using magic numbers when we have perfectly good defines just lying around. Signed-off-by: Darren Hart Cc: "David S. Miller" Cc: "H. Peter Anvin" Cc: Peter Waskiewicz Cc: Andy Shevchenko Cc: netdev@vger.kernel.org diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index ab1039a..749ddd9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -682,7 +682,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter) } adapter->hw.phy.addr = adapter->mii.phy_id; netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id); - if (addr == 32) + if (addr == PCH_GBE_PHY_REGS_LEN) return -EAGAIN; /* Selected the phy and isolate the rest */ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) { -- cgit v0.10.2 From 9025c8e253369d324111c041032018955b80dd55 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 23 Jul 2013 20:01:45 +0200 Subject: drivers/net/ethernet/stmicro/stmmac: don't check resource with devm_ioremap_resource devm_ioremap_resource does sanity checks on the given resource. No need to duplicate this in the driver. Signed-off-by: Wolfram Sang Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 03de76c..da8be6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -109,9 +109,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) const char *mac = NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - addr = devm_ioremap_resource(dev, res); if (IS_ERR(addr)) return PTR_ERR(addr); -- cgit v0.10.2 From f1a26fdf5944ff950888ae0017e546690353f85f Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Sat, 18 May 2013 14:46:00 -0700 Subject: pch_gbe: Add MinnowBoard support The MinnowBoard uses an AR803x PHY with the PCH GBE which requires special handling. Use the MinnowBoard PCI Subsystem ID to detect this and add a pci_device_id.driver_data structure and functions to handle platform setup. The AR803x does not implement the RGMII 2ns TX clock delay in the trace routing nor via strapping. Add a detection method for the board and the PHY and enable the TX clock delay via the registers. This PHY will hibernate without link for 10 seconds. Ensure the PHY is awake for probe and then disable hibernation. A future improvement would be to convert pch_gbe to using PHYLIB and making sure we can wake the PHY at the necessary times rather than permanently disabling it. Signed-off-by: Darren Hart Cc: "David S. Miller" Cc: "H. Peter Anvin" Cc: Peter Waskiewicz Cc: Andy Shevchenko Cc: Joe Perches Cc: netdev@vger.kernel.org diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 7779036..6797b10 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -582,6 +582,19 @@ struct pch_gbe_hw_stats { }; /** + * struct pch_gbe_privdata - PCI Device ID driver data + * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software + * @phy_disable_hibernate: Bool, disable PHY hibernation + * @platform_init: Platform initialization callback, called from + * probe, prior to PHY initialization. + */ +struct pch_gbe_privdata { + bool phy_tx_clk_delay; + bool phy_disable_hibernate; + int (*platform_init)(struct pci_dev *pdev); +}; + +/** * struct pch_gbe_adapter - board specific private data structure * @stats_lock: Spinlock structure for status * @ethtool_lock: Spinlock structure for ethtool @@ -604,6 +617,7 @@ struct pch_gbe_hw_stats { * @rx_buffer_len: Receive buffer length * @tx_queue_len: Transmit queue length * @have_msi: PCI MSI mode flag + * @pch_gbe_privdata: PCI Device ID driver_data */ struct pch_gbe_adapter { @@ -631,6 +645,7 @@ struct pch_gbe_adapter { int hwts_tx_en; int hwts_rx_en; struct pci_dev *ptp_pdev; + struct pch_gbe_privdata *pdata; }; #define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 749ddd9..e19f1be 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -23,6 +23,7 @@ #include #include #include +#include #define DRV_VERSION "1.01" const char pch_driver_version[] = DRV_VERSION; @@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION; #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" +#define MINNOW_PHY_RESET_GPIO 13 + static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); @@ -2635,6 +2638,9 @@ static int pch_gbe_probe(struct pci_dev *pdev, adapter->pdev = pdev; adapter->hw.back = adapter; adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; + adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data; + if (adapter->pdata && adapter->pdata->platform_init) + adapter->pdata->platform_init(pdev); adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, PCI_DEVFN(12, 4)); @@ -2710,6 +2716,10 @@ static int pch_gbe_probe(struct pci_dev *pdev, dev_dbg(&pdev->dev, "PCH Network Connection\n"); + /* Disable hibernation on certain platforms */ + if (adapter->pdata && adapter->pdata->phy_disable_hibernate) + pch_gbe_phy_disable_hibernate(&adapter->hw); + device_set_wakeup_enable(&pdev->dev, 1); return 0; @@ -2720,9 +2730,48 @@ err_free_netdev: return ret; } +/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to + * ensure it is awake for probe and init. Request the line and reset the PHY. + */ +static int pch_gbe_minnow_platform_init(struct pci_dev *pdev) +{ + unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT; + unsigned gpio = MINNOW_PHY_RESET_GPIO; + int ret; + + ret = devm_gpio_request_one(&pdev->dev, gpio, flags, + "minnow_phy_reset"); + if (ret) { + dev_err(&pdev->dev, + "ERR: Can't request PHY reset GPIO line '%d'\n", gpio); + return ret; + } + + gpio_set_value(gpio, 0); + usleep_range(1250, 1500); + gpio_set_value(gpio, 1); + usleep_range(1250, 1500); + + return ret; +} + +static struct pch_gbe_privdata pch_gbe_minnow_privdata = { + .phy_tx_clk_delay = true, + .phy_disable_hibernate = true, + .platform_init = pch_gbe_minnow_platform_init, +}; + static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { {.vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, + .subvendor = PCI_VENDOR_ID_CIRCUITCO, + .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00), + .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata + }, + {.vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_NETWORK_ETHERNET << 8), diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index da07907..8b7ff75 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -74,6 +74,15 @@ #define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ #define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ +/* AR8031 PHY Debug Registers */ +#define PHY_AR803X_ID 0x00001374 +#define PHY_AR8031_DBG_OFF 0x1D +#define PHY_AR8031_DBG_DAT 0x1E +#define PHY_AR8031_SERDES 0x05 +#define PHY_AR8031_HIBERNATE 0x0B +#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */ +#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */ + /* Phy Id Register (word 2) */ #define PHY_REVISION_MASK 0x000F @@ -249,6 +258,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) } /** + * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY + * @hw: Pointer to the HW structure + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw) +{ + /* The RGMII interface requires a ~2ns TX clock delay. This is typically + * done in layout with a longer trace or via PHY strapping, but can also + * be done via PHY configuration registers. + */ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u16 mii_reg; + int ret = 0; + + switch (hw->phy.id) { + case PHY_AR803X_ID: + netdev_dbg(adapter->netdev, + "Configuring AR803X PHY for 2ns TX clock delay\n"); + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg); + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF, + PHY_AR8031_SERDES); + if (ret) + break; + + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg); + mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY; + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT, + mii_reg); + break; + default: + netdev_err(adapter->netdev, + "Unknown PHY (%x), could not set TX clock delay\n", + hw->phy.id); + return -EINVAL; + } + + if (ret) + netdev_err(adapter->netdev, + "Could not configure tx clock delay for PHY\n"); + return ret; +} + +/** * pch_gbe_phy_init_setting - PHY initial setting * @hw: Pointer to the HW structure */ @@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); + + /* Setup a TX clock delay on certain platforms */ + if (adapter->pdata && adapter->pdata->phy_tx_clk_delay) + pch_gbe_phy_tx_clk_delay(hw); +} + +/** + * pch_gbe_phy_disable_hibernate - Disable the PHY low power state + * @hw: Pointer to the HW structure + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u16 mii_reg; + int ret = 0; + + switch (hw->phy.id) { + case PHY_AR803X_ID: + netdev_dbg(adapter->netdev, + "Disabling hibernation for AR803X PHY\n"); + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF, + PHY_AR8031_HIBERNATE); + if (ret) + break; + + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg); + mii_reg &= ~PHY_AR8031_PS_HIB_EN; + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT, + mii_reg); + break; + default: + netdev_err(adapter->netdev, + "Unknown PHY (%x), could not disable hibernation\n", + hw->phy.id); + return -EINVAL; + } + + if (ret) + netdev_err(adapter->netdev, + "Could not disable PHY hibernation\n"); + return ret; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h index 03264dc..0cbe692 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw); void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw); void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw); +int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw); #endif /* _PCH_GBE_PHY_H_ */ -- cgit v0.10.2 From da4f87f088b3ddfc9153aa3559834a3922da4395 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 1 Jul 2013 13:48:56 +0200 Subject: iwlwifi: dvm: remove P2P support We're not planning to support P2P on older devices, so remove the Kconfig option and associated code for it. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index cbaa5c2..e5c133e 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -127,20 +127,3 @@ config IWLWIFI_DEVICE_TRACING If unsure, say Y so we can help you better when problems occur. endmenu - -config IWLWIFI_P2P - def_bool y - bool "iwlwifi experimental P2P support" - depends on IWLWIFI - help - This option enables experimental P2P support for some devices - based on microcode support. Since P2P support is still under - development, this option may even enable it for some devices - now that turn out to not support it in the future due to - microcode restrictions. - - To determine if your microcode supports the experimental P2P - offered by this option, check if the driver advertises AP - support when it is loaded. - - Say Y only if you want to experiment with P2P. diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h index 1835511..f2a86ff 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -106,7 +106,6 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg; #define STATUS_CHANNEL_SWITCH_PENDING 11 #define STATUS_SCAN_COMPLETE 12 #define STATUS_POWER_PMI 13 -#define STATUS_SCAN_ROC_EXPIRED 14 struct iwl_ucode_capabilities; @@ -250,7 +249,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); /* scan */ void iwlagn_post_scan(struct iwl_priv *priv); -void iwlagn_disable_roc(struct iwl_priv *priv); int iwl_force_rf_reset(struct iwl_priv *priv, bool external); void iwl_init_scan_params(struct iwl_priv *priv); int iwl_scan_cancel(struct iwl_priv *priv); @@ -265,10 +263,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, enum iwl_scan_type scan_type, enum ieee80211_band band); -void iwl_scan_roc_expired(struct iwl_priv *priv); -void iwl_scan_offchannel_skb(struct iwl_priv *priv); -void iwl_scan_offchannel_skb_status(struct iwl_priv *priv); - /* For faster active scanning, scan will move to the next channel if fewer than * PLCP_QUIET_THRESH packets are heard on this channel within * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index 60a4e0d..a79fdd1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -540,7 +540,6 @@ struct iwl_rxon_context { enum iwl_scan_type { IWL_SCAN_NORMAL, IWL_SCAN_RADIO_RESET, - IWL_SCAN_ROC, }; /** @@ -825,12 +824,6 @@ struct iwl_priv { struct reply_tx_error_statistics reply_tx_stats; struct reply_agg_tx_error_statistics reply_agg_tx_stats; - /* remain-on-channel offload support */ - struct ieee80211_channel *hw_roc_channel; - struct delayed_work hw_roc_disable_work; - int hw_roc_duration; - bool hw_roc_setup, hw_roc_start_notified; - /* bt coex */ u8 bt_enable_flag; u8 bt_status; diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 822f1a0..f0a2c95 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -76,29 +76,6 @@ static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { }, }; -static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_AP), - }, -}; - -static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = { - { - .max = 2, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_CLIENT), - }, -}; - static const struct ieee80211_iface_combination iwlagn_iface_combinations_dualmode[] = { { .num_different_channels = 1, @@ -114,21 +91,6 @@ iwlagn_iface_combinations_dualmode[] = { }, }; -static const struct ieee80211_iface_combination -iwlagn_iface_combinations_p2p[] = { - { .num_different_channels = 1, - .max_interfaces = 2, - .beacon_int_infra_match = true, - .limits = iwlagn_p2p_sta_go_limits, - .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits), - }, - { .num_different_channels = 1, - .max_interfaces = 2, - .limits = iwlagn_p2p_2sta_limits, - .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits), - }, -}; - /* * Not a mac80211 entry point function, but it fits in with all the * other mac80211 functions grouped here. @@ -186,19 +148,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { - hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(iwlagn_iface_combinations_p2p); - } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { + if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwlagn_iface_combinations_dualmode); } - hw->wiphy->max_remain_on_channel_duration = 500; - hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | WIPHY_FLAG_IBSS_RSN; @@ -1156,126 +1112,6 @@ done: IWL_DEBUG_MAC80211(priv, "leave\n"); } -static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel *channel, - int duration, - enum ieee80211_roc_type type) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; - int err = 0; - - if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) - return -EOPNOTSUPP; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->mutex); - - if (test_bit(STATUS_SCAN_HW, &priv->status)) { - /* mac80211 should not scan while ROC or ROC while scanning */ - if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) { - err = -EBUSY; - goto out; - } - - iwl_scan_cancel_timeout(priv, 100); - - if (test_bit(STATUS_SCAN_HW, &priv->status)) { - err = -EBUSY; - goto out; - } - } - - priv->hw_roc_channel = channel; - /* convert from ms to TU */ - priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024); - priv->hw_roc_start_notified = false; - cancel_delayed_work(&priv->hw_roc_disable_work); - - if (!ctx->is_active) { - static const struct iwl_qos_info default_qos_data = { - .def_qos_parm = { - .ac[0] = { - .cw_min = cpu_to_le16(3), - .cw_max = cpu_to_le16(7), - .aifsn = 2, - .edca_txop = cpu_to_le16(1504), - }, - .ac[1] = { - .cw_min = cpu_to_le16(7), - .cw_max = cpu_to_le16(15), - .aifsn = 2, - .edca_txop = cpu_to_le16(3008), - }, - .ac[2] = { - .cw_min = cpu_to_le16(15), - .cw_max = cpu_to_le16(1023), - .aifsn = 3, - }, - .ac[3] = { - .cw_min = cpu_to_le16(15), - .cw_max = cpu_to_le16(1023), - .aifsn = 7, - }, - }, - }; - - ctx->is_active = true; - ctx->qos_data = default_qos_data; - ctx->staging.dev_type = RXON_DEV_TYPE_P2P; - memcpy(ctx->staging.node_addr, - priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, - ETH_ALEN); - memcpy(ctx->staging.bssid_addr, - priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr, - ETH_ALEN); - err = iwlagn_commit_rxon(priv, ctx); - if (err) - goto out; - ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK | - RXON_FILTER_PROMISC_MSK | - RXON_FILTER_CTL2HOST_MSK; - - err = iwlagn_commit_rxon(priv, ctx); - if (err) { - iwlagn_disable_roc(priv); - goto out; - } - priv->hw_roc_setup = true; - } - - err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band); - if (err) - iwlagn_disable_roc(priv); - - out: - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return err; -} - -static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); - - if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) - return -EOPNOTSUPP; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - mutex_lock(&priv->mutex); - iwl_scan_cancel_timeout(priv, priv->hw_roc_duration); - iwlagn_disable_roc(priv); - mutex_unlock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "leave\n"); - - return 0; -} - static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_rssi_event rssi_event) @@ -1431,12 +1267,8 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", viftype, vif->addr); - cancel_delayed_work_sync(&priv->hw_roc_disable_work); - mutex_lock(&priv->mutex); - iwlagn_disable_roc(priv); - if (!iwl_is_ready_rf(priv)) { IWL_WARN(priv, "Try to add interface when device not ready\n"); err = -EINVAL; @@ -1763,8 +1595,6 @@ struct ieee80211_ops iwlagn_hw_ops = { .channel_switch = iwlagn_mac_channel_switch, .flush = iwlagn_mac_flush, .tx_last_beacon = iwlagn_mac_tx_last_beacon, - .remain_on_channel = iwlagn_mac_remain_on_channel, - .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel, .rssi_callback = iwlagn_mac_rssi_callback, .set_tim = iwlagn_mac_set_tim, }; diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 3952ddf..f78a3d3 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -587,11 +587,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) priv->contexts[IWL_RXON_CTX_PAN].interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); - if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P) - priv->contexts[IWL_RXON_CTX_PAN].interface_modes |= - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO); - priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; @@ -854,14 +849,6 @@ void iwl_down(struct iwl_priv *priv) iwl_scan_cancel_timeout(priv, 200); - /* - * If active, scanning won't cancel it, so say it expired. - * No race since we hold the mutex here and a new one - * can't come in at this time. - */ - if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT) - ieee80211_remain_on_channel_expired(priv->hw); - exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); @@ -1002,41 +989,6 @@ static void iwl_bg_restart(struct work_struct *data) } } - - - -void iwlagn_disable_roc(struct iwl_priv *priv) -{ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; - - lockdep_assert_held(&priv->mutex); - - if (!priv->hw_roc_setup) - return; - - ctx->staging.dev_type = RXON_DEV_TYPE_P2P; - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - - priv->hw_roc_channel = NULL; - - memset(ctx->staging.node_addr, 0, ETH_ALEN); - - iwlagn_commit_rxon(priv, ctx); - - ctx->is_active = false; - priv->hw_roc_setup = false; -} - -static void iwlagn_disable_roc_work(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, - hw_roc_disable_work.work); - - mutex_lock(&priv->mutex); - iwlagn_disable_roc(priv); - mutex_unlock(&priv->mutex); -} - /***************************************************************************** * * driver setup and teardown @@ -1053,8 +1005,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); - INIT_DELAYED_WORK(&priv->hw_roc_disable_work, - iwlagn_disable_roc_work); iwl_setup_scan_deferred_work(priv); @@ -1082,7 +1032,6 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv) cancel_work_sync(&priv->bt_full_concurrency); cancel_work_sync(&priv->bt_runtime_config); - cancel_delayed_work_sync(&priv->hw_roc_disable_work); del_timer_sync(&priv->statistics_periodic); del_timer_sync(&priv->ucode_trace); @@ -1169,12 +1118,6 @@ static void iwl_option_config(struct iwl_priv *priv) #else IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n"); #endif - -#ifdef CONFIG_IWLWIFI_P2P - IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n"); -#else - IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n"); -#endif } static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) @@ -1315,10 +1258,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, ucode_flags = fw->ucode_capa.flags; -#ifndef CONFIG_IWLWIFI_P2P - ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; -#endif - if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; @@ -1413,7 +1352,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, * if not PAN, then don't support P2P -- might be a uCode * packaging bug or due to the eeprom check above */ - ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; priv->sta_key_max_num = STA_KEY_MAX_NUM; trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index cd1ad001..d7ce2f1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -564,11 +564,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv) cmd.slots[0].type = 0; /* BSS */ cmd.slots[1].type = 1; /* PAN */ - if (priv->hw_roc_setup) { - /* both contexts must be used for this to happen */ - slot1 = IWL_MIN_SLOT_TIME; - slot0 = 3000; - } else if (ctx_bss->vif && ctx_pan->vif) { + if (ctx_bss->vif && ctx_pan->vif) { int bcnint = ctx_pan->beacon_int; int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c index 8c686a5..35e0ee8 100644 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/iwlwifi/dvm/scan.c @@ -100,9 +100,6 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted) ieee80211_scan_completed(priv->hw, aborted); } - if (priv->scan_type == IWL_SCAN_ROC) - iwl_scan_roc_expired(priv); - priv->scan_type = IWL_SCAN_NORMAL; priv->scan_vif = NULL; priv->scan_request = NULL; @@ -130,9 +127,6 @@ static void iwl_process_scan_complete(struct iwl_priv *priv) goto out_settings; } - if (priv->scan_type == IWL_SCAN_ROC) - iwl_scan_roc_expired(priv); - if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { int err; @@ -284,12 +278,6 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv, le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); - if (priv->scan_type == IWL_SCAN_ROC && - !priv->hw_roc_start_notified) { - ieee80211_ready_on_channel(priv->hw); - priv->hw_roc_start_notified = true; - } - return 0; } @@ -697,8 +685,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; scan->quiet_time = IWL_ACTIVE_QUIET_TIME; - if (priv->scan_type != IWL_SCAN_ROC && - iwl_is_any_associated(priv)) { + if (iwl_is_any_associated(priv)) { u16 interval = 0; u32 extra; u32 suspend_time = 100; @@ -706,9 +693,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); switch (priv->scan_type) { - case IWL_SCAN_ROC: - WARN_ON(1); - break; case IWL_SCAN_RADIO_RESET: interval = 0; break; @@ -728,11 +712,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->suspend_time = cpu_to_le32(scan_suspend_time); IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", scan_suspend_time, interval); - } else if (priv->scan_type == IWL_SCAN_ROC) { - scan->suspend_time = 0; - scan->max_out_time = 0; - scan->quiet_time = 0; - scan->quiet_plcp_th = 0; } switch (priv->scan_type) { @@ -774,9 +753,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) } else IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); break; - case IWL_SCAN_ROC: - IWL_DEBUG_SCAN(priv, "Start ROC scan.\n"); - break; } scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; @@ -898,7 +874,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan_cmd_size - sizeof(*scan)); break; case IWL_SCAN_RADIO_RESET: - case IWL_SCAN_ROC: /* use bcast addr, will not be transmitted but must be valid */ cmd_len = iwl_fill_probe_req( (struct ieee80211_mgmt *)scan->data, @@ -926,46 +901,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) is_active, n_probes, (void *)&scan->data[cmd_len]); break; - case IWL_SCAN_ROC: { - struct iwl_scan_channel *scan_ch; - int n_chan, i; - u16 dwell; - - dwell = iwl_limit_dwell(priv, priv->hw_roc_duration); - n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell); - - scan->channel_count = n_chan; - - scan_ch = (void *)&scan->data[cmd_len]; - - for (i = 0; i < n_chan; i++) { - scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; - scan_ch->channel = - cpu_to_le16(priv->hw_roc_channel->hw_value); - - if (i == n_chan - 1) - dwell = priv->hw_roc_duration - i * dwell; - - scan_ch->active_dwell = - scan_ch->passive_dwell = cpu_to_le16(dwell); - - /* Set txpower levels to defaults */ - scan_ch->dsp_atten = 110; - - /* NOTE: if we were doing 6Mb OFDM for scans we'd use - * power level: - * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; - */ - if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ) - scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; - else - scan_ch->tx_gain = ((1 << 5) | (5 << 3)); - - scan_ch++; - } - } - - break; } if (scan->channel_count == 0) { @@ -1035,7 +970,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", scan_type == IWL_SCAN_NORMAL ? "" : - scan_type == IWL_SCAN_ROC ? "remain-on-channel " : "internal short "); set_bit(STATUS_SCANNING, &priv->status); @@ -1149,40 +1083,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) mutex_unlock(&priv->mutex); } } - -void iwl_scan_roc_expired(struct iwl_priv *priv) -{ - /* - * The status bit should be set here, to prevent a race - * where the atomic_read returns 1, but before the execution continues - * iwl_scan_offchannel_skb_status() checks if the status bit is set - */ - set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status); - - if (atomic_read(&priv->num_aux_in_flight) == 0) { - ieee80211_remain_on_channel_expired(priv->hw); - priv->hw_roc_channel = NULL; - schedule_delayed_work(&priv->hw_roc_disable_work, - 10 * HZ); - - clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status); - } else { - IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n", - atomic_read(&priv->num_aux_in_flight)); - } -} - -void iwl_scan_offchannel_skb(struct iwl_priv *priv) -{ - WARN_ON(!priv->hw_roc_start_notified); - atomic_inc(&priv->num_aux_in_flight); -} - -void iwl_scan_offchannel_skb_status(struct iwl_priv *priv) -{ - if (atomic_dec_return(&priv->num_aux_in_flight) == 0 && - test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) { - IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n"); - iwl_scan_roc_expired(priv); - } -} diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 5ee983f..3db0bbb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -478,9 +478,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, if (sta_priv && sta_priv->client && !is_agg) atomic_inc(&sta_priv->pending_frames); - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) - iwl_scan_offchannel_skb(priv); - return 0; drop_unlock_sta: @@ -1158,7 +1155,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, struct sk_buff *skb; struct iwl_rxon_context *ctx; bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); - bool is_offchannel_skb; tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> IWLAGN_TX_RES_TID_POS; @@ -1178,8 +1174,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, __skb_queue_head_init(&skbs); - is_offchannel_skb = false; - if (tx_resp->frame_count == 1) { u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10); @@ -1256,8 +1250,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, if (!is_agg) iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); - is_offchannel_skb = - (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); freed++; } @@ -1271,14 +1263,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, if (!is_agg && freed != 1) IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); - /* - * An offchannel frame can be send only on the AUX queue, where - * there is no aggregation (and reordering) so it only is single - * skb is expected to be processed. - */ - if (is_offchannel_skb && freed != 1) - IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed); - IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id, iwl_get_tx_fail_reason(status), status); @@ -1298,9 +1282,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, ieee80211_tx_status_ni(priv->hw, skb); } - if (is_offchannel_skb) - iwl_scan_offchannel_skb_status(priv); - return 0; } -- cgit v0.10.2 From 26e05cc32d4dab8ad939fd1318bc470d737a20ca Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Mon, 8 Jul 2013 13:12:29 +0300 Subject: iwlwifi: mvm: enable pre-scan passive to active Enable passive to active scan feature, on channels that was active in the past hour. Signed-off-by: David Spinadel Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index b60d141..c33ff8e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -138,6 +138,8 @@ struct iwl_ssid_ie { *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND: *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND: *@SCAN_FLAGS_FRAGMENTED_SCAN: + *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active + * in the past hour, even if they are marked as passive. */ enum iwl_scan_flags { SCAN_FLAGS_PERIODIC_SCAN = BIT(0), @@ -145,6 +147,7 @@ enum iwl_scan_flags { SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2), SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3), SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4), + SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5), }; /** @@ -179,7 +182,7 @@ enum iwl_scan_type { * @quiet_time: in msecs, dwell this time for active scan on quiet channels * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than * this number of packets were received (typically 1) - * @passive2active: is auto switching from passive to active allowed (0 or 1) + * @passive2active: is auto switching from passive to active during scan allowed * @rxchain_sel_flags: RXON_RX_CHAIN_* * @max_out_time: in usecs, max out of serving channel time * @suspend_time: how long to pause scan when returning to service channel: diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 2157b0f..dc63579 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -306,10 +306,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, */ if (req->n_ssids > 0) { cmd->passive2active = cpu_to_le16(1); + cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE; ssid = req->ssids[0].ssid; ssid_len = req->ssids[0].ssid_len; } else { cmd->passive2active = 0; + cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE; } iwl_mvm_scan_fill_ssids(cmd, req); -- cgit v0.10.2 From 637b4caeedde9b926de6e66d68d0951b0f0c83ef Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 1 Jul 2013 14:14:46 -0700 Subject: Bluetooth: Fix simple whitespace vs tab style issue Signed-off-by: Marcel Holtmann Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e3a3499..b821b19 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -605,7 +605,7 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt) * as supported send it. If not supported assume that the controller * does not have actual support for stored link keys which makes this * command redundant anyway. - */ + */ if (hdev->commands[6] & 0x80) { struct hci_cp_delete_stored_link_key cp; -- cgit v0.10.2 From a77b15a60cb1d54d87e8794bfbc94c9ccee679ed Mon Sep 17 00:00:00 2001 From: Mikel Astiz Date: Fri, 28 Jun 2013 10:56:27 +0200 Subject: Bluetooth: Add HCI authentication capabilities macros Add macros for the HCI capabilities as described in the Bluetooth Core Specification v4.0, Volume 2, part E, section 7.1.29. Signed-off-by: Mikel Astiz Signed-off-by: Gustavo Padovan diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 3c592cf..a01fbb4 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -296,6 +296,12 @@ enum { #define HCI_AT_GENERAL_BONDING 0x04 #define HCI_AT_GENERAL_BONDING_MITM 0x05 +/* I/O capabilities */ +#define HCI_IO_DISPLAY_ONLY 0x00 +#define HCI_IO_DISPLAY_YESNO 0x01 +#define HCI_IO_KEYBOARD_ONLY 0x02 +#define HCI_IO_NO_INPUT_OUTPUT 0x03 + /* Link Key types */ #define HCI_LK_COMBINATION 0x00 #define HCI_LK_LOCAL_UNIT 0x01 -- cgit v0.10.2 From acabae96df2dff253f831e94c33ef9f0f15600d0 Mon Sep 17 00:00:00 2001 From: Mikel Astiz Date: Fri, 28 Jun 2013 10:56:28 +0200 Subject: Bluetooth: Use defines in in hci_get_auth_req() Make the code in hci_get_auth_req() more readable by using the defined macros instead of inlining magic numbers. Signed-off-by: Mikel Astiz Signed-off-by: Timo Mueller Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 0437200..ce8be0b 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3024,17 +3024,20 @@ unlock: static u8 hci_get_auth_req(struct hci_conn *conn) { /* If remote requests dedicated bonding follow that lead */ - if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { + if (conn->remote_auth == HCI_AT_DEDICATED_BONDING || + conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) { /* If both remote and local IO capabilities allow MITM * protection then require it, otherwise don't */ - if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) - return 0x02; + if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT || + conn->io_capability == HCI_IO_NO_INPUT_OUTPUT) + return HCI_AT_DEDICATED_BONDING; else - return 0x03; + return HCI_AT_DEDICATED_BONDING_MITM; } /* If remote requests no-bonding follow that lead */ - if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) + if (conn->remote_auth == HCI_AT_NO_BONDING || + conn->remote_auth == HCI_AT_NO_BONDING_MITM) return conn->remote_auth | (conn->auth_type & 0x01); return conn->auth_type; -- cgit v0.10.2 From a767631ad1c2e785f4a8fcad26bcf50eb5786373 Mon Sep 17 00:00:00 2001 From: Mikel Astiz Date: Fri, 28 Jun 2013 10:56:29 +0200 Subject: Bluetooth: Use defines instead of integer literals Replace the occurrences of integer literals in hci_event.c with the newly introduced macros in hci.h. Signed-off-by: Mikel Astiz Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index ce8be0b..50e39f4 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3069,7 +3069,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) /* Change the IO capability from KeyboardDisplay * to DisplayYesNo as it is not supported by BT spec. */ cp.capability = (conn->io_capability == 0x04) ? - 0x01 : conn->io_capability; + HCI_IO_DISPLAY_YESNO : conn->io_capability; conn->auth_type = hci_get_auth_req(conn); cp.authentication = conn->auth_type; @@ -3143,7 +3143,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, * request. The only exception is when we're dedicated bonding * initiators (connect_cfm_cb set) since then we always have the MITM * bit set. */ - if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { + if (!conn->connect_cfm_cb && loc_mitm && + conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { BT_DBG("Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); @@ -3151,8 +3152,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, } /* If no side requires MITM protection; auto-accept */ - if ((!loc_mitm || conn->remote_cap == 0x03) && - (!rem_mitm || conn->io_capability == 0x03)) { + if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && + (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { /* If we're not the initiators request authorization to * proceed from user space (mgmt_user_confirm with -- cgit v0.10.2 From 1c244f79c0d6abf8634faedb9b042122481c3572 Mon Sep 17 00:00:00 2001 From: Gustavo Padovan Date: Fri, 7 Dec 2012 03:29:10 -0200 Subject: Bluetooth: Add missing braces to an "else if" Trivial change in the coding style. Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 8c3499b..b3bb7bc 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1415,8 +1415,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) sk->sk_state_change(sk); release_sock(sk); - } else if (chan->state == BT_CONNECT) + } else if (chan->state == BT_CONNECT) { l2cap_do_start(chan); + } l2cap_chan_unlock(chan); } -- cgit v0.10.2 From 2583d706a13d0dc7fa591d5bb036454d0ddbf5b0 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 11 Jul 2013 15:41:29 +0200 Subject: Bluetooth: hidp: implement hidinput_input_event callback We can re-enable hidinput_input_event to allow the leds of bluetooth keyboards to be set. Now the callbacks uses hid core to retrieve the right HID report to send, so this version is safer. Signed-off-by: Benjamin Tissoires Reviewed-by: David Herrmann Acked-by: Jiri Kosina Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 0c699cd..2977bf7 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -238,6 +238,31 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep return hidp_send_intr_message(session, hdr, buf, rsize); } +static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, + unsigned int code, int value) +{ + struct hid_device *hid = input_get_drvdata(dev); + struct hidp_session *session = hid->driver_data; + struct hid_field *field; + int offset; + + BT_DBG("session %p type %d code %d value %d", + session, type, code, value); + + if (type != EV_LED) + return -1; + + offset = hidinput_find_field(hid, type, code, &field); + if (offset == -1) { + hid_warn(dev, "event field not found\n"); + return -1; + } + + hid_set_field(field, offset, value); + + return hidp_send_report(session, field->report); +} + static int hidp_get_raw_report(struct hid_device *hid, unsigned char report_number, unsigned char *data, size_t count, @@ -711,6 +736,7 @@ static struct hid_ll_driver hidp_hid_driver = { .stop = hidp_stop, .open = hidp_open, .close = hidp_close, + .hidinput_input_event = hidp_hidinput_event, }; /* This function sets up the hid device. It does not add it -- cgit v0.10.2 From 159d865f2078ffa4441abb0155f725368371f836 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 11 Jul 2013 15:41:30 +0200 Subject: Bluetooth: hidp: remove wrong send_report at init The USB hid implementation does retrieve the reports during the start. However, this implementation does not call the HID command GET_REPORT (which would fetch the current status of each report), but use the DATA command, which is an Output Report (so transmitting data from the host to the device). The Wiimote controller is already guarded against this problem in the protocol, but it is not conformant to the specification to set all the reports to 0 on start. Signed-off-by: Benjamin Tissoires Reviewed-by: David Herrmann Acked-by: Jiri Kosina Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 2977bf7..13863de 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -703,20 +703,6 @@ static int hidp_parse(struct hid_device *hid) static int hidp_start(struct hid_device *hid) { - struct hidp_session *session = hid->driver_data; - struct hid_report *report; - - if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS) - return 0; - - list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT]. - report_list, list) - hidp_send_report(session, report); - - list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT]. - report_list, list) - hidp_send_report(session, report); - return 0; } -- cgit v0.10.2 From 473c13179c5828f7123a4d9fe449d8bd79fae254 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 19 Jul 2013 16:09:38 +0900 Subject: Bluetooth: replace strict_strtol() with kstrtol() The usage of strict_strtol() is not preferred, because strict_strtol() is obsolete. Thus, kstrtol() should be used. Signed-off-by: Jingoo Han Acked-by: Marcel Holtmann Signed-off-by: Gustavo Padovan diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c index db2c3c3..023d35e 100644 --- a/drivers/bluetooth/btmrvl_debugfs.c +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -43,7 +43,7 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; - ret = strict_strtol(buf, 10, &result); + ret = kstrtol(buf, 10, &result); if (ret) return ret; @@ -89,7 +89,7 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; - ret = strict_strtol(buf, 10, &result); + ret = kstrtol(buf, 10, &result); if (ret) return ret; @@ -135,7 +135,7 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; - ret = strict_strtol(buf, 10, &result); + ret = kstrtol(buf, 10, &result); if (ret) return ret; -- cgit v0.10.2 From f52809483caceaf83bd2c7915a35ace3b6e7b0ef Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Wed, 24 Jul 2013 14:53:26 +0800 Subject: bonding: use pre-defined macro in bond_mode_name instead of magic number 0 We have BOND_MODE_ROUNDROBIN pre-defined as 0, and it's the lowest mode number. Use it to check the arg lower bound instead of magic number 0 in bond_mode_name. Signed-off-by: Wang Sheng-Hui Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 414d07e..fc10c12 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -273,7 +273,7 @@ const char *bond_mode_name(int mode) [BOND_MODE_ALB] = "adaptive load balancing", }; - if (mode < 0 || mode > BOND_MODE_ALB) + if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB) return "unknown"; return names[mode]; -- cgit v0.10.2 From 6b41f941d7cdfb88b0222f5532af5032e4008bb6 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:55 -0700 Subject: mwifiex: handle driver initialization error paths mwifiex_fw_dpc() asynchronously takes care of firmware download and initialization. Currently the error paths in mwifiex_fw_dpc() are not handled. So if wrong firmware is downloaded, required cleanup work is not performed. memory is leaked and workqueue remains unterminated in this case. mwifiex_terminate_workqueue() is moved to avoid forward declaration. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index e64c369..5644c7f 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -390,6 +390,17 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter) } /* + * This function cancels all works in the queue and destroys + * the main workqueue. + */ +static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) +{ + flush_workqueue(adapter->workqueue); + destroy_workqueue(adapter->workqueue); + adapter->workqueue = NULL; +} + +/* * This function gets firmware and initializes it. * * The main initialization steps followed are - @@ -398,7 +409,7 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter) */ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) { - int ret; + int ret, i; char fmt[64]; struct mwifiex_private *priv; struct mwifiex_adapter *adapter = context; @@ -407,7 +418,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (!firmware) { dev_err(adapter->dev, "Failed to get firmware %s\n", adapter->fw_name); - goto done; + goto err_dnld_fw; } memset(&fw, 0, sizeof(struct mwifiex_fw_image)); @@ -420,7 +431,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) else ret = mwifiex_dnld_fw(adapter, &fw); if (ret == -1) - goto done; + goto err_dnld_fw; dev_notice(adapter->dev, "WLAN FW is active\n"); @@ -432,13 +443,15 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) } /* enable host interrupt after fw dnld is successful */ - if (adapter->if_ops.enable_int) - adapter->if_ops.enable_int(adapter); + if (adapter->if_ops.enable_int) { + if (adapter->if_ops.enable_int(adapter)) + goto err_dnld_fw; + } adapter->init_wait_q_woken = false; ret = mwifiex_init_fw(adapter); if (ret == -1) { - goto done; + goto err_init_fw; } else if (!ret) { adapter->hw_status = MWIFIEX_HW_STATUS_READY; goto done; @@ -447,12 +460,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) wait_event_interruptible(adapter->init_wait_q, adapter->init_wait_q_woken); if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) - goto done; + goto err_init_fw; priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; if (mwifiex_register_cfg80211(adapter)) { dev_err(adapter->dev, "cannot register with cfg80211\n"); - goto err_init_fw; + goto err_register_cfg80211; } rtnl_lock(); @@ -483,13 +496,39 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) goto done; err_add_intf: - mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); + for (i = 0; i < adapter->priv_num; i++) { + priv = adapter->priv[i]; + + if (!priv) + continue; + + if (priv->wdev && priv->netdev) + mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); + } rtnl_unlock(); +err_register_cfg80211: + wiphy_unregister(adapter->wiphy); + wiphy_free(adapter->wiphy); err_init_fw: if (adapter->if_ops.disable_int) adapter->if_ops.disable_int(adapter); +err_dnld_fw: pr_debug("info: %s: unregister device\n", __func__); - adapter->if_ops.unregister_dev(adapter); + if (adapter->if_ops.unregister_dev) + adapter->if_ops.unregister_dev(adapter); + + if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) || + (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) { + pr_debug("info: %s: shutdown mwifiex\n", __func__); + adapter->init_wait_q_woken = false; + + if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) + wait_event_interruptible(adapter->init_wait_q, + adapter->init_wait_q_woken); + } + adapter->surprise_removed = true; + mwifiex_terminate_workqueue(adapter); + mwifiex_free_adapter(adapter); done: if (adapter->cal_data) { release_firmware(adapter->cal_data); @@ -497,6 +536,7 @@ done: } release_firmware(adapter->firmware); complete(&adapter->fw_load); + up(adapter->card_sem); return; } @@ -807,18 +847,6 @@ static void mwifiex_main_work_queue(struct work_struct *work) } /* - * This function cancels all works in the queue and destroys - * the main workqueue. - */ -static void -mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) -{ - flush_workqueue(adapter->workqueue); - destroy_workqueue(adapter->workqueue); - adapter->workqueue = NULL; -} - -/* * This function adds the card. * * This function follows the following major steps to set up the device - @@ -846,6 +874,7 @@ mwifiex_add_card(void *card, struct semaphore *sem, } adapter->iface_type = iface_type; + adapter->card_sem = sem; adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; adapter->surprise_removed = false; @@ -876,17 +905,12 @@ mwifiex_add_card(void *card, struct semaphore *sem, goto err_init_fw; } - up(sem); return 0; err_init_fw: pr_debug("info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); -err_registerdev: - adapter->surprise_removed = true; - mwifiex_terminate_workqueue(adapter); -err_kmalloc: if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) || (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) { pr_debug("info: %s: shutdown mwifiex\n", __func__); @@ -896,7 +920,10 @@ err_kmalloc: wait_event_interruptible(adapter->init_wait_q, adapter->init_wait_q_woken); } - +err_registerdev: + adapter->surprise_removed = true; + mwifiex_terminate_workqueue(adapter); +err_kmalloc: mwifiex_free_adapter(adapter); err_init_sw: diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index bb28d3d..9ee3b1b 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -749,6 +749,7 @@ struct mwifiex_adapter { atomic_t is_tx_received; atomic_t pending_bridged_pkts; + struct semaphore *card_sem; }; int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); -- cgit v0.10.2 From 2cdf359a521bb72286b6714478dfdbcd2691f3fe Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 22 Jul 2013 19:17:56 -0700 Subject: mwifiex: code rearrangement in sdio.c Some function definitions are moved to appropriate place to avoid forward declarations. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index c32a735..1eb5efa1 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -50,9 +50,6 @@ static struct mwifiex_if_ops sdio_ops; static struct semaphore add_remove_card_sem; -static int mwifiex_sdio_resume(struct device *dev); -static void mwifiex_sdio_interrupt(struct sdio_func *func); - /* * SDIO probe. * @@ -113,6 +110,51 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) } /* + * SDIO resume. + * + * Kernel needs to suspend all functions separately. Therefore all + * registered functions must have drivers with suspend and resume + * methods. Failing that the kernel simply removes the whole card. + * + * If already not resumed, this function turns on the traffic and + * sends a host sleep cancel request to the firmware. + */ +static int mwifiex_sdio_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct sdio_mmc_card *card; + struct mwifiex_adapter *adapter; + mmc_pm_flag_t pm_flag = 0; + + if (func) { + pm_flag = sdio_get_host_pm_caps(func); + card = sdio_get_drvdata(func); + if (!card || !card->adapter) { + pr_err("resume: invalid card or adapter\n"); + return 0; + } + } else { + pr_err("resume: sdio_func is not specified\n"); + return 0; + } + + adapter = card->adapter; + + if (!adapter->is_suspended) { + dev_warn(adapter->dev, "device already resumed\n"); + return 0; + } + + adapter->is_suspended = false; + + /* Disable Host Sleep */ + mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), + MWIFIEX_ASYNC_CMD); + + return 0; +} + +/* * SDIO remove. * * This function removes the interface and frees up the card structure. @@ -212,51 +254,6 @@ static int mwifiex_sdio_suspend(struct device *dev) return ret; } -/* - * SDIO resume. - * - * Kernel needs to suspend all functions separately. Therefore all - * registered functions must have drivers with suspend and resume - * methods. Failing that the kernel simply removes the whole card. - * - * If already not resumed, this function turns on the traffic and - * sends a host sleep cancel request to the firmware. - */ -static int mwifiex_sdio_resume(struct device *dev) -{ - struct sdio_func *func = dev_to_sdio_func(dev); - struct sdio_mmc_card *card; - struct mwifiex_adapter *adapter; - mmc_pm_flag_t pm_flag = 0; - - if (func) { - pm_flag = sdio_get_host_pm_caps(func); - card = sdio_get_drvdata(func); - if (!card || !card->adapter) { - pr_err("resume: invalid card or adapter\n"); - return 0; - } - } else { - pr_err("resume: sdio_func is not specified\n"); - return 0; - } - - adapter = card->adapter; - - if (!adapter->is_suspended) { - dev_warn(adapter->dev, "device already resumed\n"); - return 0; - } - - adapter->is_suspended = false; - - /* Disable Host Sleep */ - mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), - MWIFIEX_ASYNC_CMD); - - return 0; -} - /* Device ID for SD8786 */ #define SDIO_DEVICE_ID_MARVELL_8786 (0x9116) /* Device ID for SD8787 */ @@ -707,6 +704,65 @@ static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) } /* + * This function reads the interrupt status from card. + */ +static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) +{ + struct sdio_mmc_card *card = adapter->card; + u8 sdio_ireg; + unsigned long flags; + + if (mwifiex_read_data_sync(adapter, card->mp_regs, + card->reg->max_mp_regs, + REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) { + dev_err(adapter->dev, "read mp_regs failed\n"); + return; + } + + sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG]; + if (sdio_ireg) { + /* + * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS + * For SDIO new mode CMD port interrupts + * DN_LD_CMD_PORT_HOST_INT_STATUS and/or + * UP_LD_CMD_PORT_HOST_INT_STATUS + * Clear the interrupt status register + */ + dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg); + spin_lock_irqsave(&adapter->int_lock, flags); + adapter->int_status |= sdio_ireg; + spin_unlock_irqrestore(&adapter->int_lock, flags); + } +} + +/* + * SDIO interrupt handler. + * + * This function reads the interrupt status from firmware and handles + * the interrupt in current thread (ksdioirqd) right away. + */ +static void +mwifiex_sdio_interrupt(struct sdio_func *func) +{ + struct mwifiex_adapter *adapter; + struct sdio_mmc_card *card; + + card = sdio_get_drvdata(func); + if (!card || !card->adapter) { + pr_debug("int: func=%p card=%p adapter=%p\n", + func, card, card ? card->adapter : NULL); + return; + } + adapter = card->adapter; + + if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) + adapter->ps_state = PS_STATE_AWAKE; + + mwifiex_interrupt_status(adapter); + mwifiex_main_process(adapter); +} + +/* * This function enables the host interrupt. * * The host interrupt enable mask is written to the card @@ -963,65 +1019,6 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, } /* - * This function reads the interrupt status from card. - */ -static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) -{ - struct sdio_mmc_card *card = adapter->card; - u8 sdio_ireg; - unsigned long flags; - - if (mwifiex_read_data_sync(adapter, card->mp_regs, - card->reg->max_mp_regs, - REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) { - dev_err(adapter->dev, "read mp_regs failed\n"); - return; - } - - sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG]; - if (sdio_ireg) { - /* - * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS - * For SDIO new mode CMD port interrupts - * DN_LD_CMD_PORT_HOST_INT_STATUS and/or - * UP_LD_CMD_PORT_HOST_INT_STATUS - * Clear the interrupt status register - */ - dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg); - spin_lock_irqsave(&adapter->int_lock, flags); - adapter->int_status |= sdio_ireg; - spin_unlock_irqrestore(&adapter->int_lock, flags); - } -} - -/* - * SDIO interrupt handler. - * - * This function reads the interrupt status from firmware and handles - * the interrupt in current thread (ksdioirqd) right away. - */ -static void -mwifiex_sdio_interrupt(struct sdio_func *func) -{ - struct mwifiex_adapter *adapter; - struct sdio_mmc_card *card; - - card = sdio_get_drvdata(func); - if (!card || !card->adapter) { - pr_debug("int: func=%p card=%p adapter=%p\n", - func, card, card ? card->adapter : NULL); - return; - } - adapter = card->adapter; - - if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) - adapter->ps_state = PS_STATE_AWAKE; - - mwifiex_interrupt_status(adapter); - mwifiex_main_process(adapter); -} - -/* * This function decodes a received packet. * * Based on the type, the packet is treated as either a data, or -- cgit v0.10.2 From 93d8bf9fb8f39d6d3e461db60f883d9f81006159 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 24 Jul 2013 11:51:41 -0700 Subject: bridge: cleanup netpoll code This started out with fixing a sparse warning, then I realized that the wrapper function br_netpoll_info could just be collapsed away by rolling it into the enable code. Also, eliminate unnecessary goto's Signed-off-by: Stephen Hemminger Reviewed-by: Jiri Pirko Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 2ef6678..50d86b3 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -244,22 +244,22 @@ fail: int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) { struct netpoll *np; - int err = 0; + int err; + + if (!p->br->dev->npinfo) + return 0; np = kzalloc(sizeof(*p->np), gfp); - err = -ENOMEM; if (!np) - goto out; + return -ENOMEM; err = __netpoll_setup(np, p->dev, gfp); if (err) { kfree(np); - goto out; + return err; } p->np = np; - -out: return err; } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 5623be6..aa6c9a8 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -363,7 +363,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (err) goto err2; - if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL)))) + err = br_netpoll_enable(p, GFP_KERNEL); + if (err) goto err3; err = netdev_master_upper_dev_link(dev, br->dev); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 3be89b3..43347f1 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -333,11 +333,6 @@ extern void br_dev_delete(struct net_device *dev, struct list_head *list); extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER -static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) -{ - return br->dev->npinfo; -} - static inline void br_netpoll_send_skb(const struct net_bridge_port *p, struct sk_buff *skb) { @@ -350,11 +345,6 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp); extern void br_netpoll_disable(struct net_bridge_port *p); #else -static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) -{ - return NULL; -} - static inline void br_netpoll_send_skb(const struct net_bridge_port *p, struct sk_buff *skb) { -- cgit v0.10.2 From 0fb52a27a04ad88a68a89b1bed32892fb16fa100 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 24 Jul 2013 11:52:44 -0700 Subject: team: cleanup netpoll clode This started out with fixing a sparse warning, then I realized that the wrapper function team_netpoll_info could just be collapsed away by rolling it into the enable code. Signed-off-by: Stephen Hemminger Acked-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 75159e4..9ccccd4 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1037,6 +1037,9 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port, struct netpoll *np; int err; + if (!team->dev->npinfo) + return 0; + np = kzalloc(sizeof(*np), gfp); if (!np) return -ENOMEM; @@ -1063,12 +1066,6 @@ static void team_port_disable_netpoll(struct team_port *port) __netpoll_cleanup(np); kfree(np); } - -static struct netpoll_info *team_netpoll_info(struct team *team) -{ - return team->dev->npinfo; -} - #else static int team_port_enable_netpoll(struct team *team, struct team_port *port, gfp_t gfp) @@ -1078,10 +1075,6 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port, static void team_port_disable_netpoll(struct team_port *port) { } -static struct netpoll_info *team_netpoll_info(struct team *team) -{ - return NULL; -} #endif static void __team_port_change_port_added(struct team_port *port, bool linkup); @@ -1163,13 +1156,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_vids_add; } - if (team_netpoll_info(team)) { - err = team_port_enable_netpoll(team, port, GFP_KERNEL); - if (err) { - netdev_err(dev, "Failed to enable netpoll on device %s\n", - portname); - goto err_enable_netpoll; - } + err = team_port_enable_netpoll(team, port, GFP_KERNEL); + if (err) { + netdev_err(dev, "Failed to enable netpoll on device %s\n", + portname); + goto err_enable_netpoll; } err = netdev_master_upper_dev_link(port_dev, dev); -- cgit v0.10.2 From 10eccb46b521359fa344f63459087df722f0776d Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 24 Jul 2013 11:53:57 -0700 Subject: bond: cleanup netpoll code This started out with fixing a sparse warning, then I realized that the wrapper function bond_netpoll_info could just be removed by rolling it into the enable code. Signed-off-by: Stephen Hemminger Reviewed-by: Jiri Pirko Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fc10c12..f584352 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1245,12 +1245,6 @@ static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, g } return err; } - -static struct netpoll_info *bond_netpoll_info(struct bonding *bond) -{ - return bond->dev->npinfo; -} - #else static inline int slave_enable_netpoll(struct slave *slave) { @@ -1795,7 +1789,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_set_carrier(bond); #ifdef CONFIG_NET_POLL_CONTROLLER - slave_dev->npinfo = bond_netpoll_info(bond); + slave_dev->npinfo = bond->dev->npinfo; if (slave_dev->npinfo) { if (slave_enable_netpoll(new_slave)) { read_unlock(&bond->lock); -- cgit v0.10.2 From e7428e95a06fb516fac1308bd0e176e27c0b9287 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 25 Jul 2013 10:20:23 +0930 Subject: virtio-net: put virtio net header inline with data For small packets we can simplify xmit processing by linearizing buffers with the header: most packets seem to have enough head room we can use for this purpose. Since existing hypervisors require that header is the first s/g element, we need a feature bit for this. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell Signed-off-by: David S. Miller diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3d2a90a..f216002 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -106,6 +106,9 @@ struct virtnet_info { /* Has control virtqueue */ bool has_cvq; + /* Host can handle any s/g split between our header and packet data */ + bool any_header_sg; + /* enable config space updates */ bool config_enable; @@ -669,12 +672,28 @@ static void free_old_xmit_skbs(struct send_queue *sq) static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { - struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); + struct skb_vnet_hdr *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; unsigned num_sg; + unsigned hdr_len; + bool can_push; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); + if (vi->mergeable_rx_bufs) + hdr_len = sizeof hdr->mhdr; + else + hdr_len = sizeof hdr->hdr; + + can_push = vi->any_header_sg && + !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && + !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; + /* Even if we can, don't push here yet as this would skew + * csum_start offset below. */ + if (can_push) + hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); + else + hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; @@ -703,15 +722,18 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; } - hdr->mhdr.num_buffers = 0; - - /* Encode metadata header at front. */ if (vi->mergeable_rx_bufs) - sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); - else - sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); + hdr->mhdr.num_buffers = 0; - num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; + if (can_push) { + __skb_push(skb, hdr_len); + num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); + /* Pull header back to avoid skew in tx bytes calculations. */ + __skb_pull(skb, hdr_len); + } else { + sg_set_buf(sq->sg, hdr, hdr_len); + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; + } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } @@ -1552,6 +1574,9 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) + vi->any_header_sg = true; + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; @@ -1727,6 +1752,7 @@ static unsigned int features[] = { VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, VIRTIO_NET_F_CTRL_MAC_ADDR, + VIRTIO_F_ANY_LAYOUT, }; static struct virtio_driver virtio_net_driver = { diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index c520203..227d4ce 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -70,7 +70,9 @@ struct virtio_net_config { __u16 max_virtqueue_pairs; } __attribute__((packed)); -/* This is the first element of the scatter-gather list. If you don't +/* This header comes first in the scatter-gather list. + * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must + * be the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ struct virtio_net_hdr { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset -- cgit v0.10.2 From 024ec3deac33ddbd81f3c887506f132b24ea21a7 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Thu, 25 Jul 2013 10:52:05 +0900 Subject: net/sctp: Refactor SCTP skb checksum computation This patch consolidates the SCTP checksum calculation code from various places to a single new function, sctp_compute_cksum(skb, offset). Signed-off-by: Joe Stringer Reviewed-by: Julian Anastasov Acked-by: Simon Horman Signed-off-by: David S. Miller diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index 78b88aa..483e630 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h @@ -85,4 +85,19 @@ static inline __le32 sctp_end_cksum(__u32 crc32) return cpu_to_le32(~crc32); } +/* Calculate the CRC32C checksum of an SCTP packet. */ +static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, + unsigned int offset) +{ + const struct sk_buff *iter; + + __u32 crc32 = sctp_start_cksum(skb->data + offset, + skb_headlen(skb) - offset); + skb_walk_frags(skb, iter) + crc32 = sctp_update_cksum((__u8 *) iter->data, + skb_headlen(iter), crc32); + + return sctp_end_cksum(crc32); +} + #endif /* __sctp_checksum_h__ */ diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 3c0da87..23e596e 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -66,15 +66,7 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph, unsigned int sctphoff) { - __u32 crc32; - struct sk_buff *iter; - - crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff); - skb_walk_frags(skb, iter) - crc32 = sctp_update_cksum((u8 *) iter->data, - skb_headlen(iter), crc32); - sctph->checksum = sctp_end_cksum(crc32); - + sctph->checksum = sctp_compute_cksum(skb, sctphoff); skb->ip_summed = CHECKSUM_UNNECESSARY; } @@ -151,10 +143,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) { unsigned int sctphoff; struct sctphdr *sh, _sctph; - struct sk_buff *iter; - __le32 cmp; - __le32 val; - __u32 tmp; + __le32 cmp, val; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) @@ -168,13 +157,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) return 0; cmp = sh->checksum; - - tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb)); - skb_walk_frags(skb, iter) - tmp = sctp_update_cksum((__u8 *) iter->data, - skb_headlen(iter), tmp); - - val = sctp_end_cksum(tmp); + val = sctp_compute_cksum(skb, sctphoff); if (val != cmp) { /* CRC failure, dump it. */ diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c index 396e55d..754536f 100644 --- a/net/netfilter/nf_nat_proto_sctp.c +++ b/net/netfilter/nf_nat_proto_sctp.c @@ -34,9 +34,7 @@ sctp_manip_pkt(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { - struct sk_buff *frag; sctp_sctphdr_t *hdr; - __u32 crc32; if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) return false; @@ -51,11 +49,7 @@ sctp_manip_pkt(struct sk_buff *skb, hdr->dest = tuple->dst.u.sctp.port; } - crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); - skb_walk_frags(skb, frag) - crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag), - crc32); - hdr->checksum = sctp_end_cksum(crc32); + hdr->checksum = sctp_compute_cksum(skb, hdroff); return true; } diff --git a/net/sctp/input.c b/net/sctp/input.c index 7993495..fa91aff 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -87,15 +87,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb) { struct sctphdr *sh = sctp_hdr(skb); __le32 cmp = sh->checksum; - struct sk_buff *list; - __le32 val; - __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); - - skb_walk_frags(skb, list) - tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), - tmp); - - val = sctp_end_cksum(tmp); + __le32 val = sctp_compute_cksum(skb, 0); if (val != cmp) { /* CRC failure, dump it. */ -- cgit v0.10.2 From 6680ec68eff47d36f67b4351bc9836fd6cba9532 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 25 Jul 2013 13:00:33 +0800 Subject: tuntap: hardware vlan tx support Inspired by commit f09e2249c4f5c7c13261ec73f5a7807076af0c8e (macvtap: restore vlan header on user read). This patch adds hardware vlan tx support for tuntap. This is done by copying vlan header directly into userspace in tun_put_user() instead of doing it through __vlan_put_tag() in dev_hard_start_xmit(). This eliminates one unnecessary memmove() in vlan_insert_tag() for 802.1ad and 802.1q traffic. pktgen test shows about 20% improvement for 802.1q traffic: Before: 662149pps 317Mb/sec (317831520bps) errors: 0 After: 801033pps 384Mb/sec (384495840bps) errors: 0 Cc: Basil Gor Cc: Michael S. Tsirkin Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a72d141..5dce262 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -1265,6 +1266,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, { struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; + int vlan_offset = 0; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) @@ -1328,11 +1330,40 @@ static ssize_t tun_put_user(struct tun_struct *tun, total += tun->vnet_hdr_sz; } - len = min_t(int, skb->len, len); + if (!vlan_tx_tag_present(skb)) { + len = min_t(int, skb->len, len); + } else { + int copy, ret; + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + } veth; + + veth.h_vlan_proto = skb->vlan_proto; + veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); + + vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); + len = min_t(int, skb->len + VLAN_HLEN, len); + + copy = min_t(int, vlan_offset, len); + ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy); + len -= copy; + total += copy; + if (ret || !len) + goto done; + + copy = min_t(int, sizeof(veth), len); + ret = memcpy_toiovecend(iv, (void *)&veth, total, copy); + len -= copy; + total += copy; + if (ret || !len) + goto done; + } - skb_copy_datagram_const_iovec(skb, 0, iv, total, len); - total += skb->len; + skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len); + total += len; +done: tun->dev->stats.tx_packets++; tun->dev->stats.tx_bytes += len; @@ -1691,7 +1722,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun_flow_init(tun); dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | - TUN_USER_FEATURES; + TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; dev->features = dev->hw_features; dev->vlan_features = dev->features; -- cgit v0.10.2 From a88c32ae15f25fcf0a3c9fadd92f840a1abf0e43 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 25 Jul 2013 13:47:53 +0800 Subject: USBNET: centralize computing of max rx/tx qlen This patch centralizes computing of max rx/tx qlen, because: - RX_QLEN()/TX_QLEN() is called in hot path - computing depends on device's usb speed, now we have ls/fs, hs, ss, so more checks need to be involved - in fact, max rx/tx qlen should not only depend on device USB speed, but also depend on ethernet link speed, so we need to consider that in future. - if SG support is done, max tx qlen may need change too Generally, hard_mtu and rx_urb_size are changed in bind(), reset() and link_reset() callback, and change mtu network operation, this patches introduces the API of usbnet_update_max_qlen(), and calls it in above path. Signed-off-by: Ming Lei Signed-off-by: David S. Miller diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index ad5d1e4..b96ad4f 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -778,6 +778,9 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu) dev->hard_mtu = net->mtu + net->hard_header_len; ax88178_set_mfb(dev); + /* max qlen depend on hard_mtu and rx_urb_size */ + usbnet_update_max_qlen(dev); + return 0; } diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 1e3c302..6585404 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -688,6 +688,9 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu) 2, 2, &tmp16); } + /* max qlen depend on hard_mtu and rx_urb_size */ + usbnet_update_max_qlen(dev); + return 0; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 06ee82f..d74de07 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -59,15 +59,13 @@ * For high speed, each frame comfortably fits almost 36 max size * Ethernet packets (so queues should be bigger). * - * REVISIT qlens should be members of 'struct usbnet'; the goal is to - * let the USB host controller be busy for 5msec or more before an irq - * is required, under load. Jumbograms change the equation. + * The goal is to let the USB host controller be busy for 5msec or + * more before an irq is required, under load. Jumbograms change + * the equation. */ -#define RX_MAX_QUEUE_MEMORY (60 * 1518) -#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ - (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) -#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ - (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) +#define MAX_QUEUE_MEMORY (60 * 1518) +#define RX_QLEN(dev) ((dev)->rx_qlen) +#define TX_QLEN(dev) ((dev)->tx_qlen) // reawaken network queue this soon after stopping; else watchdog barks #define TX_TIMEOUT_JIFFIES (5*HZ) @@ -347,6 +345,22 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(usbnet_skb_return); +/* must be called if hard_mtu or rx_urb_size changed */ +void usbnet_update_max_qlen(struct usbnet *dev) +{ + enum usb_device_speed speed = dev->udev->speed; + + switch (speed) { + case USB_SPEED_HIGH: + dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; + break; + default: + dev->rx_qlen = dev->tx_qlen = 4; + } +} +EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); + /*------------------------------------------------------------------------- * @@ -375,6 +389,9 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu) usbnet_unlink_rx_urbs(dev); } + /* max qlen depend on hard_mtu and rx_urb_size */ + usbnet_update_max_qlen(dev); + return 0; } EXPORT_SYMBOL_GPL(usbnet_change_mtu); @@ -843,6 +860,9 @@ int usbnet_open (struct net_device *net) goto done; } + /* hard_mtu or rx_urb_size may change in reset() */ + usbnet_update_max_qlen(dev); + // insist peer be connected if (info->check_connect && (retval = info->check_connect (dev)) < 0) { netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); @@ -927,6 +947,9 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) if (dev->driver_info->link_reset) dev->driver_info->link_reset(dev); + /* hard_mtu or rx_urb_size may change in link_reset() */ + usbnet_update_max_qlen(dev); + return retval; } @@ -1020,6 +1043,9 @@ static void __handle_link_change(struct usbnet *dev) tasklet_schedule(&dev->bh); } + /* hard_mtu or rx_urb_size may change during link change */ + usbnet_update_max_qlen(dev); + clear_bit(EVENT_LINK_CHANGE, &dev->flags); } @@ -1599,6 +1625,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if ((dev->driver_info->flags & FLAG_WWAN) != 0) SET_NETDEV_DEVTYPE(net, &wwan_type); + /* initialize max rx_qlen and tx_qlen */ + usbnet_update_max_qlen(dev); + status = register_netdev (net); if (status) goto out4; diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index f18d641..8fbc008 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -34,6 +34,7 @@ struct usbnet { struct mutex phy_mutex; unsigned char suspend_count; unsigned char pkt_cnt, pkt_err; + unsigned short rx_qlen, tx_qlen; /* i/o info: pipes etc */ unsigned in, out; @@ -253,4 +254,6 @@ extern void usbnet_link_change(struct usbnet *, bool, bool); extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags); extern void usbnet_status_stop(struct usbnet *dev); +extern void usbnet_update_max_qlen(struct usbnet *dev); + #endif /* __LINUX_USB_USBNET_H */ -- cgit v0.10.2 From 452c447a497dce3c9faeb9ac7f2e1ff39232876b Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 25 Jul 2013 13:47:54 +0800 Subject: USBNET: increase max rx/tx qlen for improving USB3 thoughtput The default RX_QLEN()/TX_QLEN() didn't consider super speed USB device, so only max 4 URBs are scheduled at the same time for tx/rx, then USB3 NIC can't perform very well. With this patch, both rx and tx thoughput are increased more than 100Mbps when doing iperf test on ax88179_178a USB 3.0 NIC. Signed-off-by: Ming Lei Signed-off-by: David S. Miller diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index d74de07..e4811d7 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -355,6 +355,15 @@ void usbnet_update_max_qlen(struct usbnet *dev) dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; break; + case USB_SPEED_SUPER: + /* + * Not take default 5ms qlen for super speed HC to + * save memory, and iperf tests show 2.5ms qlen can + * work well + */ + dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; + break; default: dev->rx_qlen = dev->tx_qlen = 4; } -- cgit v0.10.2 From 82a54d0ebbee03a8dcf4e1e4016a53fed4d6c933 Mon Sep 17 00:00:00 2001 From: Asias He Date: Thu, 25 Jul 2013 17:39:34 +0800 Subject: VSOCK: Move af_vsock.h and vsock_addr.h to include/net This is useful for other VSOCK transport implemented outside the net/vmw_vsock/ directory to use these headers. Signed-off-by: Asias He Acked-by: Andy King Signed-off-by: David S. Miller diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h new file mode 100644 index 0000000..7d64d36 --- /dev/null +++ b/include/net/af_vsock.h @@ -0,0 +1,175 @@ +/* + * VMware vSockets Driver + * + * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __AF_VSOCK_H__ +#define __AF_VSOCK_H__ + +#include +#include +#include + +#include "vsock_addr.h" + +#define LAST_RESERVED_PORT 1023 + +#define vsock_sk(__sk) ((struct vsock_sock *)__sk) +#define sk_vsock(__vsk) (&(__vsk)->sk) + +struct vsock_sock { + /* sk must be the first member. */ + struct sock sk; + struct sockaddr_vm local_addr; + struct sockaddr_vm remote_addr; + /* Links for the global tables of bound and connected sockets. */ + struct list_head bound_table; + struct list_head connected_table; + /* Accessed without the socket lock held. This means it can never be + * modified outsided of socket create or destruct. + */ + bool trusted; + bool cached_peer_allow_dgram; /* Dgram communication allowed to + * cached peer? + */ + u32 cached_peer; /* Context ID of last dgram destination check. */ + const struct cred *owner; + /* Rest are SOCK_STREAM only. */ + long connect_timeout; + /* Listening socket that this came from. */ + struct sock *listener; + /* Used for pending list and accept queue during connection handshake. + * The listening socket is the head for both lists. Sockets created + * for connection requests are placed in the pending list until they + * are connected, at which point they are put in the accept queue list + * so they can be accepted in accept(). If accept() cannot accept the + * connection, it is marked as rejected so the cleanup function knows + * to clean up the socket. + */ + struct list_head pending_links; + struct list_head accept_queue; + bool rejected; + struct delayed_work dwork; + u32 peer_shutdown; + bool sent_request; + bool ignore_connecting_rst; + + /* Private to transport. */ + void *trans; +}; + +s64 vsock_stream_has_data(struct vsock_sock *vsk); +s64 vsock_stream_has_space(struct vsock_sock *vsk); +void vsock_pending_work(struct work_struct *work); +struct sock *__vsock_create(struct net *net, + struct socket *sock, + struct sock *parent, + gfp_t priority, unsigned short type); + +/**** TRANSPORT ****/ + +struct vsock_transport_recv_notify_data { + u64 data1; /* Transport-defined. */ + u64 data2; /* Transport-defined. */ + bool notify_on_block; +}; + +struct vsock_transport_send_notify_data { + u64 data1; /* Transport-defined. */ + u64 data2; /* Transport-defined. */ +}; + +struct vsock_transport { + /* Initialize/tear-down socket. */ + int (*init)(struct vsock_sock *, struct vsock_sock *); + void (*destruct)(struct vsock_sock *); + void (*release)(struct vsock_sock *); + + /* Connections. */ + int (*connect)(struct vsock_sock *); + + /* DGRAM. */ + int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *); + int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, + struct msghdr *msg, size_t len, int flags); + int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, + struct iovec *, size_t len); + bool (*dgram_allow)(u32 cid, u32 port); + + /* STREAM. */ + /* TODO: stream_bind() */ + ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *, + size_t len, int flags); + ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *, + size_t len); + s64 (*stream_has_data)(struct vsock_sock *); + s64 (*stream_has_space)(struct vsock_sock *); + u64 (*stream_rcvhiwat)(struct vsock_sock *); + bool (*stream_is_active)(struct vsock_sock *); + bool (*stream_allow)(u32 cid, u32 port); + + /* Notification. */ + int (*notify_poll_in)(struct vsock_sock *, size_t, bool *); + int (*notify_poll_out)(struct vsock_sock *, size_t, bool *); + int (*notify_recv_init)(struct vsock_sock *, size_t, + struct vsock_transport_recv_notify_data *); + int (*notify_recv_pre_block)(struct vsock_sock *, size_t, + struct vsock_transport_recv_notify_data *); + int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t, + struct vsock_transport_recv_notify_data *); + int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t, + ssize_t, bool, struct vsock_transport_recv_notify_data *); + int (*notify_send_init)(struct vsock_sock *, + struct vsock_transport_send_notify_data *); + int (*notify_send_pre_block)(struct vsock_sock *, + struct vsock_transport_send_notify_data *); + int (*notify_send_pre_enqueue)(struct vsock_sock *, + struct vsock_transport_send_notify_data *); + int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t, + struct vsock_transport_send_notify_data *); + + /* Shutdown. */ + int (*shutdown)(struct vsock_sock *, int); + + /* Buffer sizes. */ + void (*set_buffer_size)(struct vsock_sock *, u64); + void (*set_min_buffer_size)(struct vsock_sock *, u64); + void (*set_max_buffer_size)(struct vsock_sock *, u64); + u64 (*get_buffer_size)(struct vsock_sock *); + u64 (*get_min_buffer_size)(struct vsock_sock *); + u64 (*get_max_buffer_size)(struct vsock_sock *); + + /* Addressing. */ + u32 (*get_local_cid)(void); +}; + +/**** CORE ****/ + +int vsock_core_init(const struct vsock_transport *t); +void vsock_core_exit(void); + +/**** UTILS ****/ + +void vsock_release_pending(struct sock *pending); +void vsock_add_pending(struct sock *listener, struct sock *pending); +void vsock_remove_pending(struct sock *listener, struct sock *pending); +void vsock_enqueue_accept(struct sock *listener, struct sock *connected); +void vsock_insert_connected(struct vsock_sock *vsk); +void vsock_remove_bound(struct vsock_sock *vsk); +void vsock_remove_connected(struct vsock_sock *vsk); +struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr); +struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, + struct sockaddr_vm *dst); +void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)); + +#endif /* __AF_VSOCK_H__ */ diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h new file mode 100644 index 0000000..9ccd531 --- /dev/null +++ b/include/net/vsock_addr.h @@ -0,0 +1,30 @@ +/* + * VMware vSockets Driver + * + * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _VSOCK_ADDR_H_ +#define _VSOCK_ADDR_H_ + +#include + +void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port); +int vsock_addr_validate(const struct sockaddr_vm *addr); +bool vsock_addr_bound(const struct sockaddr_vm *addr); +void vsock_addr_unbind(struct sockaddr_vm *addr); +bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, + const struct sockaddr_vm *other); +int vsock_addr_cast(const struct sockaddr *addr, size_t len, + struct sockaddr_vm **out_addr); + +#endif diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 593071d..57896ee 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -96,8 +96,7 @@ #include #include #include - -#include "af_vsock.h" +#include static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); static void vsock_sk_destruct(struct sock *sk); diff --git a/net/vmw_vsock/af_vsock.h b/net/vmw_vsock/af_vsock.h deleted file mode 100644 index 7d64d36..0000000 --- a/net/vmw_vsock/af_vsock.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * VMware vSockets Driver - * - * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __AF_VSOCK_H__ -#define __AF_VSOCK_H__ - -#include -#include -#include - -#include "vsock_addr.h" - -#define LAST_RESERVED_PORT 1023 - -#define vsock_sk(__sk) ((struct vsock_sock *)__sk) -#define sk_vsock(__vsk) (&(__vsk)->sk) - -struct vsock_sock { - /* sk must be the first member. */ - struct sock sk; - struct sockaddr_vm local_addr; - struct sockaddr_vm remote_addr; - /* Links for the global tables of bound and connected sockets. */ - struct list_head bound_table; - struct list_head connected_table; - /* Accessed without the socket lock held. This means it can never be - * modified outsided of socket create or destruct. - */ - bool trusted; - bool cached_peer_allow_dgram; /* Dgram communication allowed to - * cached peer? - */ - u32 cached_peer; /* Context ID of last dgram destination check. */ - const struct cred *owner; - /* Rest are SOCK_STREAM only. */ - long connect_timeout; - /* Listening socket that this came from. */ - struct sock *listener; - /* Used for pending list and accept queue during connection handshake. - * The listening socket is the head for both lists. Sockets created - * for connection requests are placed in the pending list until they - * are connected, at which point they are put in the accept queue list - * so they can be accepted in accept(). If accept() cannot accept the - * connection, it is marked as rejected so the cleanup function knows - * to clean up the socket. - */ - struct list_head pending_links; - struct list_head accept_queue; - bool rejected; - struct delayed_work dwork; - u32 peer_shutdown; - bool sent_request; - bool ignore_connecting_rst; - - /* Private to transport. */ - void *trans; -}; - -s64 vsock_stream_has_data(struct vsock_sock *vsk); -s64 vsock_stream_has_space(struct vsock_sock *vsk); -void vsock_pending_work(struct work_struct *work); -struct sock *__vsock_create(struct net *net, - struct socket *sock, - struct sock *parent, - gfp_t priority, unsigned short type); - -/**** TRANSPORT ****/ - -struct vsock_transport_recv_notify_data { - u64 data1; /* Transport-defined. */ - u64 data2; /* Transport-defined. */ - bool notify_on_block; -}; - -struct vsock_transport_send_notify_data { - u64 data1; /* Transport-defined. */ - u64 data2; /* Transport-defined. */ -}; - -struct vsock_transport { - /* Initialize/tear-down socket. */ - int (*init)(struct vsock_sock *, struct vsock_sock *); - void (*destruct)(struct vsock_sock *); - void (*release)(struct vsock_sock *); - - /* Connections. */ - int (*connect)(struct vsock_sock *); - - /* DGRAM. */ - int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *); - int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, - struct msghdr *msg, size_t len, int flags); - int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, - struct iovec *, size_t len); - bool (*dgram_allow)(u32 cid, u32 port); - - /* STREAM. */ - /* TODO: stream_bind() */ - ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *, - size_t len, int flags); - ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *, - size_t len); - s64 (*stream_has_data)(struct vsock_sock *); - s64 (*stream_has_space)(struct vsock_sock *); - u64 (*stream_rcvhiwat)(struct vsock_sock *); - bool (*stream_is_active)(struct vsock_sock *); - bool (*stream_allow)(u32 cid, u32 port); - - /* Notification. */ - int (*notify_poll_in)(struct vsock_sock *, size_t, bool *); - int (*notify_poll_out)(struct vsock_sock *, size_t, bool *); - int (*notify_recv_init)(struct vsock_sock *, size_t, - struct vsock_transport_recv_notify_data *); - int (*notify_recv_pre_block)(struct vsock_sock *, size_t, - struct vsock_transport_recv_notify_data *); - int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t, - struct vsock_transport_recv_notify_data *); - int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t, - ssize_t, bool, struct vsock_transport_recv_notify_data *); - int (*notify_send_init)(struct vsock_sock *, - struct vsock_transport_send_notify_data *); - int (*notify_send_pre_block)(struct vsock_sock *, - struct vsock_transport_send_notify_data *); - int (*notify_send_pre_enqueue)(struct vsock_sock *, - struct vsock_transport_send_notify_data *); - int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t, - struct vsock_transport_send_notify_data *); - - /* Shutdown. */ - int (*shutdown)(struct vsock_sock *, int); - - /* Buffer sizes. */ - void (*set_buffer_size)(struct vsock_sock *, u64); - void (*set_min_buffer_size)(struct vsock_sock *, u64); - void (*set_max_buffer_size)(struct vsock_sock *, u64); - u64 (*get_buffer_size)(struct vsock_sock *); - u64 (*get_min_buffer_size)(struct vsock_sock *); - u64 (*get_max_buffer_size)(struct vsock_sock *); - - /* Addressing. */ - u32 (*get_local_cid)(void); -}; - -/**** CORE ****/ - -int vsock_core_init(const struct vsock_transport *t); -void vsock_core_exit(void); - -/**** UTILS ****/ - -void vsock_release_pending(struct sock *pending); -void vsock_add_pending(struct sock *listener, struct sock *pending); -void vsock_remove_pending(struct sock *listener, struct sock *pending); -void vsock_enqueue_accept(struct sock *listener, struct sock *connected); -void vsock_insert_connected(struct vsock_sock *vsk); -void vsock_remove_bound(struct vsock_sock *vsk); -void vsock_remove_connected(struct vsock_sock *vsk); -struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr); -struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, - struct sockaddr_vm *dst); -void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)); - -#endif /* __AF_VSOCK_H__ */ diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index ffc11df..9d69866 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -34,8 +34,8 @@ #include #include #include +#include -#include "af_vsock.h" #include "vmci_transport_notify.h" static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg); diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h index fd88ea8..ce6c962 100644 --- a/net/vmw_vsock/vmci_transport.h +++ b/net/vmw_vsock/vmci_transport.h @@ -19,8 +19,8 @@ #include #include -#include "vsock_addr.h" -#include "af_vsock.h" +#include +#include /* If the packet format changes in a release then this should change too. */ #define VMCI_TRANSPORT_PACKET_VERSION 1 diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index ec2611b..82486ee 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c @@ -17,8 +17,7 @@ #include #include #include - -#include "vsock_addr.h" +#include void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port) { diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h deleted file mode 100644 index 9ccd531..0000000 --- a/net/vmw_vsock/vsock_addr.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * VMware vSockets Driver - * - * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation version 2 and no later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _VSOCK_ADDR_H_ -#define _VSOCK_ADDR_H_ - -#include - -void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port); -int vsock_addr_validate(const struct sockaddr_vm *addr); -bool vsock_addr_bound(const struct sockaddr_vm *addr); -void vsock_addr_unbind(struct sockaddr_vm *addr); -bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, - const struct sockaddr_vm *other); -int vsock_addr_cast(const struct sockaddr *addr, size_t len, - struct sockaddr_vm **out_addr); - -#endif -- cgit v0.10.2 From c26bf4a51308c85a6f97628253b99767a84ff90a Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 25 Jul 2013 18:12:18 +0200 Subject: pktgen: Add UDPCSUM flag to support UDP checksums UDP checksums are optional, hence pktgen has been omitting them in favour of performance. The optional flag UDPCSUM enables UDP checksumming. If the output device supports hardware checksumming the skb is prepared and marked CHECKSUM_PARTIAL, otherwise the checksum is generated in software. Signed-off-by: Thomas Graf Cc: Eric Dumazet Cc: Ben Greear Signed-off-by: David S. Miller diff --git a/include/net/udp.h b/include/net/udp.h index 74c10ec..ef2e0b7 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -183,6 +183,7 @@ extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); extern int udp_push_pending_frames(struct sock *sk); extern void udp_flush_pending_frames(struct sock *sk); +extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); extern int udp_rcv(struct sk_buff *skb); extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern int udp_disconnect(struct sock *sk, int flags); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 9640972..48cebf2 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -160,6 +160,7 @@ #include #include #include +#include #include #ifdef CONFIG_XFRM #include @@ -198,6 +199,7 @@ #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ #define F_NODE (1<<15) /* Node memory alloc*/ +#define F_UDPCSUM (1<<16) /* Include UDP checksum */ /* Thread control flag bits */ #define T_STOP (1<<0) /* Stop run */ @@ -631,6 +633,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_UDPDST_RND) seq_printf(seq, "UDPDST_RND "); + if (pkt_dev->flags & F_UDPCSUM) + seq_printf(seq, "UDPCSUM "); + if (pkt_dev->flags & F_MPLS_RND) seq_printf(seq, "MPLS_RND "); @@ -1228,6 +1233,12 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "!NODE_ALLOC") == 0) pkt_dev->flags &= ~F_NODE; + else if (strcmp(f, "UDPCSUM") == 0) + pkt_dev->flags |= F_UDPCSUM; + + else if (strcmp(f, "!UDPCSUM") == 0) + pkt_dev->flags &= ~F_UDPCSUM; + else { sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", @@ -2733,7 +2744,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, udph->source = htons(pkt_dev->cur_udp_src); udph->dest = htons(pkt_dev->cur_udp_dst); udph->len = htons(datalen + 8); /* DATA + udphdr */ - udph->check = 0; /* No checksum */ + udph->check = 0; iph->ihl = 5; iph->version = 4; @@ -2752,6 +2763,24 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; + + if (!(pkt_dev->flags & F_UDPCSUM)) { + skb->ip_summed = CHECKSUM_NONE; + } else if (odev->features & NETIF_F_V4_CSUM) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum = 0; + udp4_hwcsum(skb, udph->source, udph->dest); + } else { + __wsum csum = udp_csum(skb); + + /* add protocol-dependent pseudo-header */ + udph->check = csum_tcpudp_magic(udph->source, udph->dest, + datalen + 8, IPPROTO_UDP, csum); + + if (udph->check == 0) + udph->check = CSUM_MANGLED_0; + } + pktgen_finalize_skb(pkt_dev, skb, datalen); #ifdef CONFIG_XFRM @@ -2768,7 +2797,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, struct sk_buff *skb = NULL; __u8 *eth; struct udphdr *udph; - int datalen; + int datalen, udplen; struct ipv6hdr *iph; __be16 protocol = htons(ETH_P_IPV6); __be32 *mpls; @@ -2844,10 +2873,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, net_info_ratelimited("increased datalen to %d\n", datalen); } + udplen = datalen + sizeof(struct udphdr); udph->source = htons(pkt_dev->cur_udp_src); udph->dest = htons(pkt_dev->cur_udp_dst); - udph->len = htons(datalen + sizeof(struct udphdr)); - udph->check = 0; /* No checksum */ + udph->len = htons(udplen); + udph->check = 0; *(__be32 *) iph = htonl(0x60000000); /* Version + flow */ @@ -2858,7 +2888,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, iph->hop_limit = 32; - iph->payload_len = htons(sizeof(struct udphdr) + datalen); + iph->payload_len = htons(udplen); iph->nexthdr = IPPROTO_UDP; iph->daddr = pkt_dev->cur_in6_daddr; @@ -2868,6 +2898,23 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->dev = odev; skb->pkt_type = PACKET_HOST; + if (!(pkt_dev->flags & F_UDPCSUM)) { + skb->ip_summed = CHECKSUM_NONE; + } else if (odev->features & NETIF_F_V6_CSUM) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); + } else { + __wsum csum = udp_csum(skb); + + /* add protocol-dependent pseudo-header */ + udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); + + if (udph->check == 0) + udph->check = CSUM_MANGLED_0; + } + pktgen_finalize_skb(pkt_dev, skb, datalen); return skb; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 766e6ba..9e88af0 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -704,7 +704,7 @@ EXPORT_SYMBOL(udp_flush_pending_frames); * @src: source IP address * @dst: destination IP address */ -static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) +void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) { struct udphdr *uh = udp_hdr(skb); struct sk_buff *frags = skb_shinfo(skb)->frag_list; @@ -740,6 +740,7 @@ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) uh->check = CSUM_MANGLED_0; } } +EXPORT_SYMBOL_GPL(udp4_hwcsum); static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) { -- cgit v0.10.2 From 03c633e73353d3c116cab6164933d69ee2180470 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 25 Jul 2013 14:08:04 +0200 Subject: pktgen: Use ip_send_check() to compute checksum Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 48cebf2..2dec6f1 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2758,8 +2758,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, iph->frag_off = 0; iplen = 20 + 8 + datalen; iph->tot_len = htons(iplen); - iph->check = 0; - iph->check = ip_fast_csum((void *)iph, iph->ihl); + ip_send_check(iph); skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; -- cgit v0.10.2 From dcfe8048de66c3468060c8a2ec2c04ae3725d002 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Sat, 27 Jul 2013 19:10:10 +0200 Subject: bonding: remove bond_resend_igmp_join_requests read_unlock leftover After commit 4aa5dee4d9 ("net: convert resend IGMP to notifier event") we have 1 read_unlock in bond_resend_igmp_join_requests which isn't paired with a read_lock because it's removed by that commit. Signed-off-by: Nikolay Aleksandrov Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f584352..da3af63 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -739,7 +739,6 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); } write_unlock_bh(&bond->curr_slave_lock); - read_unlock(&bond->lock); } static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) -- cgit v0.10.2 From ab90695a1a1185e0c5aff04ac16d39c5712f28bd Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 29 May 2013 18:40:36 +0000 Subject: e100: dump small buffers via %*ph Signed-off-by: Andy Shevchenko Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 5115ae7..ada6e21 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ } - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); return 0; } -- cgit v0.10.2 From 24b41c972ce7e0ff5891bfd0bb5b7d34439f297c Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Thu, 13 Jun 2013 03:55:44 +0000 Subject: e1000e: restore call to pci_clear_master() In attempting to resolve a minor merge conflict, commit e5f2ef7ab4690d2e8faa (Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net) accidentally dropped a call to pci_clear_master() that was intended to remain in place. Commit 4e0855dff094b0d56d6b (e1000e: fix pci-device enable-counter balance) replaced a call to pci_disable_device() by one to pci_clear_master(). And then commit 66148babe728f3e00e13 (e1000e: fix runtime power management transitions) deleted a number of lines starting two lines following that call. This patch restores the call to pci_clear_master() in __e1000_shutdown(). v2: added summary lines (enclosed in parens) following commit IDs Signed-off-by: Dean Nelson Tested-by: Aaron Brown Acked-by: Bruce Allan Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 77f81cb..5475cf4 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5995,6 +5995,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) */ e1000e_release_hw_control(adapter); + pci_clear_master(pdev); + /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the -- cgit v0.10.2 From 2592881990198f2b63eefcbc914a7d5259b25580 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Mon, 20 May 2013 17:31:09 +0000 Subject: e1000e: Remove duplicate assignment of default rx/tx ring size tx_ring/rx_ring size is assigned in function e1000_alloc_queues(), which is called by e1000_sw_init() in the early stage of e1000_probe(). This patch just remove the duplicate assignment of this default ring size value. Signed-off-by: Wei Yang Reviewed-by: Gavin Shan Reviewed-by: Da Yu Qiu Tested-by: Aaron Brown Acked-by: Bruce Allan Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 5475cf4..40e6232 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6725,10 +6725,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->hw.fc.current_mode = e1000_fc_default; adapter->hw.phy.autoneg_advertised = 0x2f; - /* ring size defaults */ - adapter->rx_ring->count = E1000_DEFAULT_RXD; - adapter->tx_ring->count = E1000_DEFAULT_TXD; - /* Initial Wake on LAN setting - If APM wake is enabled in * the EEPROM, enable the ACPI Magic Packet filter */ -- cgit v0.10.2 From c96ddb0ba2b395760275766e97b57f8136112a35 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Sat, 25 May 2013 06:23:45 +0000 Subject: e1000e: Use marco instead of digit for defining e1000_rx_desc_packet_split In structure e1000_rx_desc_packet_split, the size of wb.upper.length is defined by a digit. This may introduce some problem when the length is changed. This patch use the macro PS_PAGE_BUFFERS for the definition. And move the definition to hw.h. Signed-off-by: Wei Yang Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ffbc08f..ad0edd1 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -90,9 +90,6 @@ struct e1000_info; #define E1000_MNG_VLAN_NONE (-1) -/* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) - #define DEFAULT_JUMBO 9234 /* Time to wait before putting the device into D3 if there's no link (in ms). */ diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index a6f903a..cee565d 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -227,6 +227,9 @@ union e1000_rx_desc_extended { }; #define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) /* Receive Descriptor - Packet Split */ union e1000_rx_desc_packet_split { struct { @@ -251,7 +254,8 @@ union e1000_rx_desc_packet_split { } middle; struct { __le16 header_status; - __le16 length[3]; /* length of buffers 1-3 */ + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; } upper; __le64 reserved; } wb; /* writeback */ -- cgit v0.10.2 From b43e867a022c55c1bfebcf08ac96e6bd0367b67a Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 21 Jun 2013 09:07:18 +0000 Subject: e1000e: disable ASPM L1 on 82583 The 82583 can disappear off the PCIe bus. This device is a modified 82574 which had the same problem which was fixed by disabling ASPM L1; disabling it on 82583 fixes the issue on this device. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 4c303e2..104fcec 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -2057,6 +2057,7 @@ const struct e1000_info e1000_82583_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, -- cgit v0.10.2 From da1e2046e5f5ab268e55d30d6b74099ade0aeb6f Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 21 Jun 2013 09:07:02 +0000 Subject: e1000e: iAMT connections drop on driver unload when jumbo frames enabled The jumbo frame configuration in the MAC/PHY should be reverted on 82579 and newer parts when the interface is brought down (not just when the MTU is changed back to standard frame size) otherwise iAMT connections (e.g. SoL, IDE-R) will be dropped and cannot be re-acquired until the MTU is changed again. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 40e6232..99596a6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2979,17 +2979,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) u32 pages = 0; /* Workaround Si errata on PCHx - configure jumbo frame flow */ - if (hw->mac.type >= e1000_pch2lan) { - s32 ret_val; - - if (adapter->netdev->mtu > ETH_DATA_LEN) - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); - else - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); - - if (ret_val) - e_dbg("failed to enable jumbo frame workaround mode\n"); - } + if ((hw->mac.type >= e1000_pch2lan) && + (adapter->netdev->mtu > ETH_DATA_LEN) && + e1000_lv_jumbo_workaround_ich8lan(hw, true)) + e_dbg("failed to enable jumbo frame workaround mode\n"); /* Program MC offset vector base */ rctl = er32(RCTL); @@ -4034,6 +4027,12 @@ void e1000e_down(struct e1000_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; + /* Disable Si errata workaround on PCHx for jumbo frame flow */ + if ((hw->mac.type >= e1000_pch2lan) && + (adapter->netdev->mtu > ETH_DATA_LEN) && + e1000_lv_jumbo_workaround_ich8lan(hw, false)) + e_dbg("failed to disable jumbo frame workaround mode\n"); + if (!pci_channel_offline(adapter->pdev)) e1000e_reset(adapter); -- cgit v0.10.2 From ce345e082ee305fc7f2435630a7497ab85d30af6 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 21 Jun 2013 09:07:07 +0000 Subject: e1000e: low throughput using 4K jumbos on I218 Alter the packet buffer allocation accordingly. Signed-off-by: Bruce Allan Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 99596a6..51b05fe 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3819,6 +3819,8 @@ void e1000e_reset(struct e1000_adapter *adapter) break; } + pba = 14; + ew32(PBA, pba); fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; break; -- cgit v0.10.2 From e0236ad9cd29f3e08dc0baa60ea3a8b219ff2610 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 21 Jun 2013 09:07:13 +0000 Subject: e1000e: Tx hang on I218 when linked at 100Half and slow response at 10Mbps Tx hang is an unintended consequence of another workaround that is in the EEPROM for an issue with the firmware at 10Mbps when K1 (a power mode of the MAC-PHY interconnect) is enabled. The issue is resolved by setting appropriate Tx re-transmission timeouts in the PHY and associated K1 entry times in the MAC to allow enough transmissions to occur without triggering a Tx hang. A similar change is needed when linked at 10Mbps to improve latency. Signed-off-by: Bruce Allan Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 9dde390..b56c61a 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -793,29 +793,31 @@ release: * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications * preventing further DMA write requests. Workaround the issue by disabling * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. **/ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) { u32 fextnvm6 = er32(FEXTNVM6); + u32 status = er32(STATUS); s32 ret_val = 0; + u16 reg; - if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { - u16 kmrn_reg; - + if (link && (status & E1000_STATUS_SPEED_1000)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - &kmrn_reg); + ®); if (ret_val) goto release; ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg & + reg & ~E1000_KMRNCTRLSTA_K1_ENABLE); if (ret_val) goto release; @@ -827,12 +829,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg); + reg); release: hw->phy.ops.release(hw); } else { /* clear FEXTNVM6 bit 8 on link down or 10/100 */ - ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; + + if (!link || ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) + goto update_fextnvm6; + + ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); + if (ret_val) + return ret_val; + + /* Clear link status transmit timeout */ + reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + + if (status & E1000_STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + reg |= 50 << + I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); + if (ret_val) + return ret_val; + +update_fextnvm6: + ew32(FEXTNVM6, fextnvm6); } return ret_val; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 80034a2..5986569 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -93,6 +93,7 @@ #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL @@ -197,6 +198,11 @@ #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + /* PHY Low Power Idle Control */ #define I82579_LPI_CTRL PHY_REG(772, 20) #define I82579_LPI_CTRL_100_ENABLE 0x2000 -- cgit v0.10.2 From 3ef672ab1862bbd44cc364e72ebd356783ab0243 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 27 Jun 2013 02:44:44 +0000 Subject: e1000e: ethtool unnecessarily takes device out of RPM suspend A previous patch (commit e60b22c5b7 e1000e: fix accessing to suspended device) added .begin and .complete ethtool driver callbacks so that the device was resumed from Runtime Power Management (RPM) suspend state for all ethtool operations. This is overkill for operations which do not need to access any registers in the device. This patch makes it so that the device is taken out of RPM suspend only for those ethtool operations that must access device registers. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 59c22bf..e4ebd7d 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev, speed = adapter->link_speed; ecmd->duplex = adapter->link_duplex - 1; } - } else { + } else if (!pm_runtime_suspended(netdev->dev.parent)) { u32 status = er32(STATUS); if (status & E1000_STATUS_LU) { if (status & E1000_STATUS_SPEED_1000) @@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + int ret_val = 0; + + pm_runtime_get_sync(netdev->dev.parent); /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed @@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev, if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) { e_err("Cannot change link characteristics when SoL/IDER is active.\n"); - return -EINVAL; + ret_val = -EINVAL; + goto out; } /* MDI setting is only allowed when autoneg enabled because @@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev, * duplex is forced. */ if (ecmd->eth_tp_mdix_ctrl) { - if (hw->phy.media_type != e1000_media_type_copper) - return -EOPNOTSUPP; + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = -EOPNOTSUPP; + goto out; + } if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && (ecmd->autoneg != AUTONEG_ENABLE)) { e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); - return -EINVAL; + ret_val = -EINVAL; + goto out; } } @@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev, u32 speed = ethtool_cmd_speed(ecmd); /* calling this overrides forced MDI setting */ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { - clear_bit(__E1000_RESETTING, &adapter->state); - return -EINVAL; + ret_val = -EINVAL; + goto out; } } @@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev, e1000e_reset(adapter); } +out: + pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); - return 0; + return ret_val; } static void e1000_get_pauseparam(struct net_device *netdev, @@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev, while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); + pm_runtime_get_sync(netdev->dev.parent); + if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; if (netif_running(adapter->netdev)) { @@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev, } out: + pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); return retval; } @@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev, u32 *regs_buff = p; u16 phy_data; + pm_runtime_get_sync(netdev->dev.parent); + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); regs->version = (1 << 24) | (adapter->pdev->revision << 16) | @@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev, e1e_rphy(hw, MII_STAT1000, &phy_data); regs_buff[24] = (u32)phy_data; /* phy local receiver status */ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + + pm_runtime_put_sync(netdev->dev.parent); } static int e1000_get_eeprom_len(struct net_device *netdev) @@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev, if (!eeprom_buff) return -ENOMEM; + pm_runtime_get_sync(netdev->dev.parent); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { ret_val = e1000_read_nvm(hw, first_word, last_word - first_word + 1, @@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev, } } + pm_runtime_put_sync(netdev->dev.parent); + if (ret_val) { /* a read error occurred, throw away the result */ memset(eeprom_buff, 0xff, sizeof(u16) * @@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev, ptr = (void *)eeprom_buff; + pm_runtime_get_sync(netdev->dev.parent); + if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ @@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev, ret_val = e1000e_update_nvm_checksum(hw); out: + pm_runtime_put_sync(netdev->dev.parent); kfree(eeprom_buff); return ret_val; } @@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev, } } + pm_runtime_get_sync(netdev->dev.parent); + e1000e_down(adapter); /* We can't just free everything and then setup again, because the @@ -739,6 +764,7 @@ err_setup_rx: e1000e_free_tx_resources(temp_tx); err_setup: e1000e_up(adapter); + pm_runtime_put_sync(netdev->dev.parent); free_temp: vfree(temp_tx); vfree(temp_rx); @@ -1732,6 +1758,8 @@ static void e1000_diag_test(struct net_device *netdev, u8 autoneg; bool if_running = netif_running(netdev); + pm_runtime_get_sync(netdev->dev.parent); + set_bit(__E1000_TESTING, &adapter->state); if (!if_running) { @@ -1817,6 +1845,8 @@ static void e1000_diag_test(struct net_device *netdev, } msleep_interruptible(4 * 1000); + + pm_runtime_put_sync(netdev->dev.parent); } static void e1000_get_wol(struct net_device *netdev, @@ -1891,6 +1921,8 @@ static int e1000_set_phys_id(struct net_device *netdev, switch (state) { case ETHTOOL_ID_ACTIVE: + pm_runtime_get_sync(netdev->dev.parent); + if (!hw->mac.ops.blink_led) return 2; /* cycle on/off twice per second */ @@ -1902,6 +1934,7 @@ static int e1000_set_phys_id(struct net_device *netdev, e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); hw->mac.ops.led_off(hw); hw->mac.ops.cleanup_led(hw); + pm_runtime_put_sync(netdev->dev.parent); break; case ETHTOOL_ID_ON: @@ -1912,6 +1945,7 @@ static int e1000_set_phys_id(struct net_device *netdev, hw->mac.ops.led_off(hw); break; } + return 0; } @@ -1950,11 +1984,15 @@ static int e1000_set_coalesce(struct net_device *netdev, adapter->itr_setting = adapter->itr & ~3; } + pm_runtime_get_sync(netdev->dev.parent); + if (adapter->itr_setting != 0) e1000e_write_itr(adapter, adapter->itr); else e1000e_write_itr(adapter, 0); + pm_runtime_put_sync(netdev->dev.parent); + return 0; } @@ -1968,7 +2006,9 @@ static int e1000_nway_reset(struct net_device *netdev) if (!adapter->hw.mac.autoneg) return -EINVAL; + pm_runtime_get_sync(netdev->dev.parent); e1000e_reinit_locked(adapter); + pm_runtime_put_sync(netdev->dev.parent); return 0; } @@ -1982,7 +2022,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, int i; char *p = NULL; + pm_runtime_get_sync(netdev->dev.parent); + e1000e_get_stats64(netdev, &net_stats); + + pm_runtime_put_sync(netdev->dev.parent); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: @@ -2033,7 +2078,11 @@ static int e1000_get_rxnfc(struct net_device *netdev, case ETHTOOL_GRXFH: { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 mrqc = er32(MRQC); + u32 mrqc; + + pm_runtime_get_sync(netdev->dev.parent); + mrqc = er32(MRQC); + pm_runtime_put_sync(netdev->dev.parent); if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) return 0; @@ -2096,9 +2145,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) return -EOPNOTSUPP; } + pm_runtime_get_sync(netdev->dev.parent); + ret_val = hw->phy.ops.acquire(hw); - if (ret_val) + if (ret_val) { + pm_runtime_put_sync(netdev->dev.parent); return -EBUSY; + } /* EEE Capability */ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); @@ -2117,14 +2170,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) /* EEE PCS Status */ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); + if (ret_val) + goto release; if (hw->phy.type == e1000_phy_82579) phy_data <<= 8; -release: - hw->phy.ops.release(hw); - if (ret_val) - return -ENODATA; - /* Result of the EEE auto negotiation - there is no register that * has the status of the EEE negotiation so do a best-guess based * on whether Tx or Rx LPI indications have been received. @@ -2136,7 +2186,14 @@ release: edata->tx_lpi_enabled = true; edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; - return 0; +release: + hw->phy.ops.release(hw); + if (ret_val) + ret_val = -ENODATA; + + pm_runtime_put_sync(netdev->dev.parent); + + return ret_val; } static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) @@ -2169,12 +2226,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; + pm_runtime_get_sync(netdev->dev.parent); + /* reset the link */ if (netif_running(netdev)) e1000e_reinit_locked(adapter); else e1000e_reset(adapter); + pm_runtime_put_sync(netdev->dev.parent); + return 0; } @@ -2212,19 +2273,7 @@ static int e1000e_get_ts_info(struct net_device *netdev, return 0; } -static int e1000e_ethtool_begin(struct net_device *netdev) -{ - return pm_runtime_get_sync(netdev->dev.parent); -} - -static void e1000e_ethtool_complete(struct net_device *netdev) -{ - pm_runtime_put_sync(netdev->dev.parent); -} - static const struct ethtool_ops e1000_ethtool_ops = { - .begin = e1000e_ethtool_begin, - .complete = e1000e_ethtool_complete, .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, -- cgit v0.10.2 From 91a3d82f380abe24e95a6d3981c06f13894eb2ce Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Sat, 29 Jun 2013 01:15:16 +0000 Subject: e1000e: enable support for new device IDs The device IDs 0x15a0 and 0x15a1 are new SKUs that contain the same MAC as I217 and same PHY as I218. The device IDs 0x15a2 and 0x15a3 are the same as existing I218 SKUs. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index cee565d..b799fd9 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -90,6 +90,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_LPT_I217_V 0x153B #define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A #define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 +#define E1000_DEV_ID_PCH_I218_V2 0x15A1 +#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index b56c61a..78d03d3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1028,7 +1028,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; @@ -4203,7 +4205,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) u16 phy_reg, device_id = hw->adapter->pdev->device; if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || - (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (device_id == E1000_DEV_ID_PCH_I218_LM3) || + (device_id == E1000_DEV_ID_PCH_I218_V3)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 51b05fe..650e569 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6975,6 +6975,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; -- cgit v0.10.2 From 97390ab86b2bcc53c124a1541c86ea0eba1a051f Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Sat, 29 Jun 2013 07:42:25 +0000 Subject: e1000e: do not resume device from RPM suspend to read PHY status registers When the device is runtime suspended (e.g. when there is no link), do not wake it from D3 to read the PHY status; just set the values to typical power-on defaults as is done when runtime PM is not enabled and there is no link. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 650e569..e6d2c0f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4684,11 +4684,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_phy_regs *phy = &adapter->phy_regs; - if ((er32(STATUS) & E1000_STATUS_LU) && + if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && + (er32(STATUS) & E1000_STATUS_LU) && (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; - pm_runtime_get_sync(&adapter->pdev->dev); ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); @@ -4699,7 +4699,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); - pm_runtime_put_sync(&adapter->pdev->dev); } else { /* Do not read PHY registers if link is not up * Set values to typical power-on defaults -- cgit v0.10.2 From 16b095a413fc6567a56e6e41909a8757e74acbc3 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Sat, 29 Jun 2013 07:42:39 +0000 Subject: e1000e: fix I217/I218 PHY initialization flow The initialization of the PHY on I217/I218, while similar to 82579, must also check to see if the MAC and PHY are in the same mode (PCIe vs. SMBus) otherwise the PHY will be inaccessible by the MAC. Signed-off-by: Bruce Allan Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 78d03d3..af08188 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) u32 phy_id = 0; s32 ret_val; u16 retry_count; + u32 mac_reg = 0; for (retry_count = 0; retry_count < 2; retry_count++) { ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); @@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (hw->phy.id) { if (hw->phy.id == phy_id) - return true; + goto out; } else if (phy_id) { hw->phy.id = phy_id; hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); - return true; + goto out; } /* In case the PHY needs to be in mdio slow mode, @@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) ret_val = e1000e_get_phy_id(hw); hw->phy.ops.acquire(hw); - return !ret_val; + if (ret_val) + return false; +out: + if (hw->mac.type == e1000_pch_lpt) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + + return true; } /** @@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) { u32 mac_reg, fwsm = er32(FWSM); s32 ret_val; - u16 phy_reg; /* Gate automatic PHY configuration by hardware on managed and * non-managed 82579 and newer adapters. @@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ + msleep(50); + /* fall-through */ case e1000_pch2lan: - if (e1000_phy_is_accessible_pchlan(hw)) { - if (hw->mac.type == e1000_pch_lpt) { - /* Unforce SMBus mode in PHY */ - e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); - phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); - - /* Unforce SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); - } + if (e1000_phy_is_accessible_pchlan(hw)) break; - } /* fall-through */ case e1000_pchlan: @@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) if (hw->phy.ops.check_reset_block(hw)) { e_dbg("Required LANPHYPC toggle blocked by ME\n"); + ret_val = -E1000_ERR_PHY; break; } @@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; ew32(FEXTNVM3, mac_reg); - if (hw->mac.type == e1000_pch_lpt) { - /* Toggling LANPHYPC brings the PHY out of SMBus mode - * So ensure that the MAC is also out of SMBus mode - */ - mac_reg = er32(CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); - } - /* Toggle LANPHYPC Value bit */ mac_reg = er32(CTRL); mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; @@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) usleep_range(5000, 10000); } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); + usleep_range(30000, 60000); + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * so ensure that the MAC is also out of SMBus mode + */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + ret_val = -E1000_ERR_PHY; } break; default: @@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) } hw->phy.ops.release(hw); - - /* Reset the PHY before any access to it. Doing so, ensures - * that the PHY is in a known good state before we read/write - * PHY registers. The generic reset is sufficient here, - * because we haven't determined the PHY type yet. - */ - ret_val = e1000e_phy_hw_reset_generic(hw); + if (!ret_val) { + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + } out: /* Ungate automatic PHY configuration on non-managed 82579 */ -- cgit v0.10.2 From 9d4a0314642918cbda9ed4012df51e8df608fce6 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Fri, 26 Jul 2013 17:05:16 +0200 Subject: ipv4, ipv6: send igmpv3/mld packets with TC_PRIO_CONTROL v2: a) Also send ipv4 igmp messages with TC_PRIO_CONTROL Cc: William Manley Cc: Lukas Tribus Acked-by: Benjamin LaHaise Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 375aca3..ef76186 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -88,6 +88,7 @@ #include #include #include +#include #include #include @@ -315,6 +316,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) if (size < 256) return NULL; } + skb->priority = TC_PRIO_CONTROL; igmp_skb_size(skb) = size; rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, @@ -670,6 +672,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, ip_rt_put(rt); return -1; } + skb->priority = TC_PRIO_CONTROL; skb_dst_set(skb, &rt->dst); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 99cd65c..db25b8e 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1376,6 +1377,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) if (!skb) return NULL; + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, hlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { @@ -1769,7 +1771,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) rcu_read_unlock(); return; } - + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, hlen); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { -- cgit v0.10.2 From 73d94e9481a20817abe2f1b41ee441bb4f6461f7 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 29 Jul 2013 16:21:26 +1000 Subject: pktgen: add needed include file Fixes this on PowerPC (at least): net/core/pktgen.c: In function 'fill_packet_ipv6': net/core/pktgen.c:2906:3: error: implicit declaration of function 'csum_ipv6_magic' [-Werror=implicit-function-declaration] udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); ^ Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2dec6f1..261357a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -161,6 +161,7 @@ #include #include #include +#include #include #ifdef CONFIG_XFRM #include -- cgit v0.10.2 From 2d4b646613d6b12175b017aca18113945af1faf3 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Thu, 25 Jul 2013 19:21:23 +0300 Subject: net/mlx4_en: Fix BlueFlame race Fix a race between BlueFlame flow and stamping in post send flow. Example: SW: Build WQE 0 on the TX buffer, except the ownership bit SW: Set ownership for WQE 0 on the TX buffer SW: Ring doorbell for WQE 0 SW: Build WQE 1 on the TX buffer, except the ownership bit SW: Set ownership for WQE 1 on the TX buffer HW: Read WQE 0 and then WQE 1, before doorbell was rung/BF was done for WQE 1 HW: Produce CQEs for WQE 0 and WQE 1 SW: Process the CQEs, and stamp WQE 0 and WQE 1 accordingly (on the TX buffer) SW: Copy WQE 1 from the TX buffer to the BF register - ALREADY STAMPED! HW: CQE error with index 0xFFFF - the BF WQE's control segment is STAMPED, so the BF index is 0xFFFF. Error: Invalid Opcode. As a result QP enters the error state and no traffic can be sent. Solution: When stamping - do not stamp last completed wqe. Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 7c49238..6dcca98 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); } +static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, int index, + u8 owner) +{ + __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + void *end = ring->buf + ring->buf_size; + __be32 *ptr = (__be32 *)tx_desc; + int i; + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + } + } else { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + if ((void *)ptr >= end) { + ptr = ring->buf; + stamp ^= cpu_to_be32(0x80000000); + } + } + } +} + static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, @@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, void *end = ring->buf + ring->buf_size; int frags = skb_shinfo(skb)->nr_frags; int i; - __be32 *ptr = (__be32 *)tx_desc; - __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); struct skb_shared_hwtstamps hwts; if (timestamp) { @@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, skb_frag_size(frag), PCI_DMA_TODEVICE); } } - /* Stamp the freed descriptor */ - for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { - *ptr = stamp; - ptr += STAMP_DWORDS; - } - } else { if (!tx_info->inl) { if ((void *) data >= end) { @@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, ++data; } } - /* Stamp the freed descriptor */ - for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { - *ptr = stamp; - ptr += STAMP_DWORDS; - if ((void *) ptr >= end) { - ptr = ring->buf; - stamp ^= cpu_to_be32(0x80000000); - } - } - } dev_kfree_skb_any(skb); return tx_info->nr_txbb; @@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; struct mlx4_cqe *cqe; u16 index; - u16 new_index, ring_index; + u16 new_index, ring_index, stamp_index; u32 txbbs_skipped = 0; + u32 txbbs_stamp = 0; u32 cons_index = mcq->cons_index; int size = cq->size; u32 size_mask = ring->size_mask; @@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) index = cons_index & size_mask; cqe = &buf[(index << factor) + factor]; ring_index = ring->cons & size_mask; + stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, @@ -359,6 +376,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) priv, ring, ring_index, !!((ring->cons + txbbs_skipped) & ring->size), timestamp); + + mlx4_en_stamp_wqe(priv, ring, stamp_index, + !!((ring->cons + txbbs_stamp) & + ring->size)); + stamp_index = ring_index; + txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while (ring_index != new_index); -- cgit v0.10.2 From fe6f700d6cbb7e8a61711e325f53d9c9e0a42a4c Mon Sep 17 00:00:00 2001 From: Yevgeny Petrilin Date: Sun, 28 Jul 2013 18:54:21 +0300 Subject: net/mlx4_core: Respond to operation request by firmware This commit adds new firmware command and new firmware event. The firmware raises the MLX4_EVENT_TYPE_OP_REQUIRED event in order to signal the driver it needs to perform an administrative operation throughout the MLX4_CMD_GET_OP_REQ command. At the moment the supported operation is adding/removing multicast entries which are used by the firmware for handling NCSI traffic in B0 steering mode. Also, had to swap the order of mlx4_init_mcg_table() and mlx4_init_eq_table() to make sure that driver will get events only after resources are initialized to handle it. Signed-off-by: Yevgeny Petrilin Signed-off-by: Jack Morgenstein Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 299d018..141322c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -809,6 +809,15 @@ int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, return -EPERM; } +int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1252,6 +1261,15 @@ static struct mlx4_cmd_info cmd_info[] = { .wrapper = MLX4_CMD_UPDATE_QP_wrapper }, { + .opcode = MLX4_CMD_GET_OP_REQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = MLX4_CMD_GET_OP_REQ_wrapper, + }, + { .opcode = MLX4_CMD_CONF_SPECIAL_QP, .has_inbox = false, .has_outbox = false, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 7e04286..0416c5b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -79,6 +79,7 @@ enum { (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ (1ull << MLX4_EVENT_TYPE_CMD) | \ + (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) @@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break; + case MLX4_EVENT_TYPE_OP_REQUIRED: + atomic_inc(&priv->opreq_count); + /* FW commands can't be executed from interrupt context + * working in deferred task + */ + queue_work(mlx4_wq, &priv->opreq_task); + break; + case MLX4_EVENT_TYPE_COMM_CHANNEL: if (!mlx4_is_master(dev)) { mlx4_warn(dev, "Received comm channel event " diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 8873d68..aec6f58 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1705,3 +1705,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_wol_write); + +enum { + ADD_TO_MCG = 0x26, +}; + + +void mlx4_opreq_action(struct work_struct *work) +{ + struct mlx4_priv *priv = container_of(work, struct mlx4_priv, + opreq_task); + struct mlx4_dev *dev = &priv->dev; + int num_tasks = atomic_read(&priv->opreq_count); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 *outbox; + u32 modifier; + u16 token; + u16 type_m; + u16 type; + int err; + u32 num_qps; + struct mlx4_qp qp; + int i; + u8 rem_mcg; + u8 prot; + +#define GET_OP_REQ_MODIFIER_OFFSET 0x08 +#define GET_OP_REQ_TOKEN_OFFSET 0x14 +#define GET_OP_REQ_TYPE_OFFSET 0x1a +#define GET_OP_REQ_DATA_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); + return; + } + outbox = mailbox->buf; + + while (num_tasks) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to retreive required operation: %d\n", + err); + return; + } + MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); + MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); + MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); + type_m = type >> 12; + type &= 0xfff; + + switch (type) { + case ADD_TO_MCG: + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); + err = EPERM; + break; + } + mgm = (struct mlx4_mgm *)((u8 *)(outbox) + + GET_OP_REQ_DATA_OFFSET); + num_qps = be32_to_cpu(mgm->members_count) & + MGM_QPN_MASK; + rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; + prot = ((u8 *)(&mgm->members_count))[0] >> 6; + + for (i = 0; i < num_qps; i++) { + qp.qpn = be32_to_cpu(mgm->qp[i]); + if (rem_mcg) + err = mlx4_multicast_detach(dev, &qp, + mgm->gid, + prot, 0); + else + err = mlx4_multicast_attach(dev, &qp, + mgm->gid, + mgm->gid[5] + , 0, prot, + NULL); + if (err) + break; + } + break; + default: + mlx4_warn(dev, "Bad type for required operation\n"); + err = EINVAL; + break; + } + err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), + 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to acknowledge required request: %d\n", + err); + goto out; + } + memset(outbox, 0, 0xffc); + num_tasks = atomic_dec_return(&priv->opreq_count); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index fdf4166..a0a368b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); int mlx4_NOP(struct mlx4_dev *dev); int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); +void mlx4_opreq_action(struct work_struct *work); #endif /* MLX4_FW_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e85af92..f1d818f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_xrcd_table_free; } + if (!mlx4_is_slave(dev)) { + err = mlx4_init_mcg_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); + goto err_mr_table_free; + } + } + err = mlx4_init_eq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "event queue table, aborting.\n"); - goto err_mr_table_free; + goto err_mcg_table_free; } err = mlx4_cmd_use_events(dev); @@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_srq_table_free; } - if (!mlx4_is_slave(dev)) { - err = mlx4_init_mcg_table(dev); - if (err) { - mlx4_err(dev, "Failed to initialize " - "multicast group table, aborting.\n"); - goto err_qp_table_free; - } - } - err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); - goto err_mcg_table_free; + goto err_qp_table_free; } if (!mlx4_is_slave(dev)) { @@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) err_counters_table_free: mlx4_cleanup_counters_table(dev); -err_mcg_table_free: - mlx4_cleanup_mcg_table(dev); - err_qp_table_free: mlx4_cleanup_qp_table(dev); @@ -1821,6 +1817,10 @@ err_cmd_poll: err_eq_table_free: mlx4_cleanup_eq_table(dev); +err_mcg_table_free: + if (!mlx4_is_slave(dev)) + mlx4_cleanup_mcg_table(dev); + err_mr_table_free: mlx4_cleanup_mr_table(dev); @@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) } } + atomic_set(&priv->opreq_count, 0); + INIT_WORK(&priv->opreq_task, mlx4_opreq_action); + /* * Now reset the HCA before we touch the PCI capabilities or * attempt a firmware command, since a boot ROM may have left @@ -2315,12 +2318,12 @@ err_port: mlx4_cleanup_port_info(&priv->port[port]); mlx4_cleanup_counters_table(dev); - mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); @@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev) RES_TR_FREE_SLAVES_ONLY); mlx4_cleanup_counters_table(dev); - mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f3e804f..55f6245 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -39,19 +39,8 @@ #include "mlx4.h" -#define MGM_QPN_MASK 0x00FFFFFF -#define MGM_BLCK_LB_BIT 30 - static const u8 zero_gid[16]; /* automatically initialized to 0 */ -struct mlx4_mgm { - __be32 next_gid_index; - __be32 members_count; - u32 reserved[2]; - u8 gid[16]; - __be32 qp[MLX4_MAX_QP_PER_MGM]; -}; - int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) { return 1 << dev->oper_log_mgm_entry_size; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 17d9277..348bb8c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -554,6 +554,17 @@ struct mlx4_mfunc { struct mlx4_mfunc_master_ctx master; }; +#define MGM_QPN_MASK 0x00FFFFFF +#define MGM_BLCK_LB_BIT 30 + +struct mlx4_mgm { + __be32 next_gid_index; + __be32 members_count; + u32 reserved[2]; + u8 gid[16]; + __be32 qp[MLX4_MAX_QP_PER_MGM]; +}; + struct mlx4_cmd { struct pci_pool *pool; void __iomem *hcr; @@ -802,6 +813,8 @@ struct mlx4_priv { u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; __be64 slave_node_guids[MLX4_MFUNC_MAX]; + atomic_t opreq_count; + struct work_struct opreq_task; }; static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index bb1c809..cd1fdf7 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -69,6 +69,7 @@ enum { MLX4_CMD_SET_ICM_SIZE = 0xffd, /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, + MLX4_CMD_GET_OP_REQ = 0x59, /* TPT commands */ MLX4_CMD_SW2HW_MPT = 0xd, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 52c23a8..6aebdfe 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -207,6 +207,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_CMD = 0x0a, MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, + MLX4_EVENT_TYPE_OP_REQUIRED = 0x1a, MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, -- cgit v0.10.2 From 415fb87da80fa45a9cbe820cb2a86e70668688eb Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 28 Jul 2013 19:03:55 -0700 Subject: cnic: Simplify netdev events handling. After this earlier commit to simplify probing: commit 4bd9b0fffb193d2e288f67f81821af32df8d4349 cnic, bnx2x, bnx2: Simplify cnic probing. we can now reliably receive netdev events and we can simplify the handling of these events. We now remove the logic that tries to handle missed NETDEV_REGISTER events. This change will allow cleanup to be simplified in the next patch. We can now rely on the play back of netdev events during unregister_netdevice_notifier() to cleanup the structures. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index d78d4cf..764bfc1 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5628,7 +5628,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, dev = cnic_from_netdev(netdev); - if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { + if (!dev && event == NETDEV_REGISTER) { /* Check for the hot-plug device */ dev = is_cnic_dev(netdev); if (dev) { @@ -5644,7 +5644,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, else if (event == NETDEV_UNREGISTER) cnic_ulp_exit(dev); - if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { + if (event == NETDEV_UP) { if (cnic_register_netdev(dev) != 0) { cnic_put(dev); goto done; -- cgit v0.10.2 From b54345ea32ccd830066a793388a55473e05a4679 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 28 Jul 2013 19:03:56 -0700 Subject: cnic: Simplify cnic_release(). Since unregister_netdevice_notifier() will replay the NETDEV_DOWN and NETDEV_UNREGISTER_EVENTS, the cnic_dev_list will be cleaned up automatically. The loop to cleanup the cnic_dev_list can be removed in cnic_release(). Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 764bfc1..c6822e1 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5693,21 +5693,8 @@ static struct notifier_block cnic_netdev_notifier = { static void cnic_release(void) { - struct cnic_dev *dev; struct cnic_uio_dev *udev; - while (!list_empty(&cnic_dev_list)) { - dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - } - - cnic_ulp_exit(dev); - cnic_unregister_netdev(dev); - list_del_init(&dev->list); - cnic_free_dev(dev); - } while (!list_empty(&cnic_udev_list)) { udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, list); -- cgit v0.10.2 From 6cdcdbba600c0537f5c61a9ffa45e708950c1aa6 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Sun, 28 Jul 2013 19:03:57 -0700 Subject: cnic: Reset tcp_flags during cnic_cm_create(). Without resetting it, the bnx2i driver cannot use different options for different iSCSI connections. Signed-off-by: Eddie Wai Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index c6822e1..1a2e5ff 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -3603,6 +3603,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, csk1->rcv_buf = DEF_RCV_BUF; csk1->snd_buf = DEF_SND_BUF; csk1->seed = DEF_SEED; + csk1->tcp_flags = 0; *csk = csk1; return 0; -- cgit v0.10.2 From b3bd2d65e6faf4f6fe6f87d6a5163859af391109 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Sun, 28 Jul 2013 19:03:58 -0700 Subject: cnic: Update TCP options setup for iSCSI. Update TCP delayed ACK and timestamp options setup to match latest bnx2x firmware. Signed-off-by: Eddie Wai Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 1a2e5ff..24f2a09 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1427,6 +1427,28 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, rcu_read_unlock(); } +static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps, + int en_tcp_dack) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; + u16 tstorm_flags = 0; + + if (time_stamps) { + xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + } + if (en_tcp_dack) + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN; + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); + + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); +} + static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) { struct cnic_local *cp = dev->cnic_priv; @@ -1506,6 +1528,10 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), hq_bds); + cnic_bnx2x_set_tcp_options(dev, + req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE, + req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE); + return 0; } @@ -2035,9 +2061,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, xstorm_buf->pseudo_header_checksum = swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); - if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) - tstorm_buf->params |= - L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; if (kwqe3->ka_timeout) { tstorm_buf->ka_enable = 1; tstorm_buf->ka_timeout = kwqe3->ka_timeout; @@ -2084,25 +2107,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev) mac[0]); } -static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) -{ - struct cnic_local *cp = dev->cnic_priv; - struct bnx2x *bp = netdev_priv(dev->netdev); - u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; - u16 tstorm_flags = 0; - - if (tcp_ts) { - xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - } - - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); - - CNIC_WR16(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); -} - static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], u32 num, int *work) { @@ -2178,9 +2182,6 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); - cnic_bnx2x_set_tcp_timestamp(dev, - kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); - ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); if (!ret) @@ -4220,7 +4221,7 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) u32 port = CNIC_PORT(cp); cnic_init_bnx2x_mac(dev); - cnic_bnx2x_set_tcp_timestamp(dev, 1); + cnic_bnx2x_set_tcp_options(dev, 0, 1); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index ede3db3..e53f09d 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags { u16 flags; #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 -#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) -#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12) +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12 #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 #define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index f2db5fe..62f00da 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -581,8 +581,10 @@ struct iscsi_kwqe_init1 { #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 -#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) -#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6) +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7 u16 cq_num_wqes; #elif defined(__LITTLE_ENDIAN) u16 cq_num_wqes; @@ -593,8 +595,10 @@ struct iscsi_kwqe_init1 { #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 -#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) -#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6) +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7 u8 cq_log_wqes_per_page; #endif #if defined(__BIG_ENDIAN) -- cgit v0.10.2 From 28e3a8f38b503579cc788748af698f9347228cc4 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Sun, 28 Jul 2013 19:03:59 -0700 Subject: cnic: Add missing error checking for RAMROD_CMD_ID_CLOSE Completion status field should also be checked for non-zero error condition. Signed-off-by: Eddie Wai Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 24f2a09..07ec4ce 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4022,15 +4022,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) cnic_cm_upcall(cp, csk, opcode); break; - case L5CM_RAMROD_CMD_ID_CLOSE: - if (l4kcqe->status != 0) { - netdev_warn(dev->netdev, "RAMROD CLOSE compl with " - "status 0x%x\n", l4kcqe->status); + case L5CM_RAMROD_CMD_ID_CLOSE: { + struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe; + + if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) { + netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n", + l4kcqe->status, l5kcqe->completion_status); opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; /* Fall through */ } else { break; } + } case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: -- cgit v0.10.2 From ca67a3cb239d17586403b456fd92989a95e7ce54 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 28 Jul 2013 19:04:00 -0700 Subject: cnic: Update version to 2.5.17 and copyright year. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 07ec4ce..6bd6d14 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,6 +1,6 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 62c6706..e7a2474 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -1,6 +1,6 @@ /* cnic.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2011 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index e53f09d..95a8e4b 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -1,7 +1,7 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index ec9bb9a..82a0312 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@ /* cnic_if.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.16" -#define CNIC_MODULE_RELDATE "Dec 05, 2012" +#define CNIC_MODULE_VERSION "2.5.17" +#define CNIC_MODULE_RELDATE "July 28, 2013" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 -- cgit v0.10.2 From c1459356d9814cc7579487e18b158627d2aad22e Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Mon, 29 Jul 2013 13:58:35 -0700 Subject: tg3: Remove incorrect switch to aux power During probe, the driver is incorrectly switching the power to Vaux on the 5717 and later devices. At this point, we are in D0 state and drawing maximum power. We also definitely have Vmain available. It doesn't make sense to switch to Vaux since it has a lesser maximum power draw and we might go over the limit. On a new system, we observe that not all ports are recognized in some of the slots with this call in place. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d964f30..2e4bf5f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17547,11 +17547,6 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_asic_rev(tp) == ASIC_REV_5762) tg3_flag_set(tp, PTP_CAPABLE); - if (tg3_flag(tp, 5717_PLUS)) { - /* Resume a low-power mode */ - tg3_frob_aux_power(tp, false); - } - tg3_timer_init(tp); tg3_carrier_off(tp); -- cgit v0.10.2 From 5137a2ee2007d9cbbbeebd14abe08357a079b607 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Mon, 29 Jul 2013 13:58:36 -0700 Subject: tg3: Allow NVRAM programming when interface is down Previously, when the interface was brought down, the driver would set the power state to D3hot. In D3hot, we don't have access to the NVRAM. This patch removes the call to set the power state to PCI_D3hot in close. A following patch will implement the shutdown handler to properly set the D3hot state when the system is going down. Doing the above means that the TG3_PHYFLG_IS_LOW_POWER should not be checked to validate access to the NVRAM. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 2e4bf5f..ec244c9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11502,7 +11502,7 @@ static int tg3_close(struct net_device *dev) memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); - tg3_power_down(tp); + tg3_power_down_prepare(tp); tg3_carrier_off(tp); @@ -11724,9 +11724,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if (tg3_flag(tp, NO_NVRAM)) return -EINVAL; - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - offset = eeprom->offset; len = eeprom->len; eeprom->len = 0; @@ -11784,9 +11781,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *buf; __be32 start, end; - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; @@ -13515,7 +13509,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_phy_start(tp); } if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - tg3_power_down(tp); + tg3_power_down_prepare(tp); } -- cgit v0.10.2 From 4c305fa2cbe2a85c34899763fcefb843c87b591d Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Mon, 29 Jul 2013 13:58:37 -0700 Subject: tg3: Implement the shutdown handler Also remove the call to tg3_power_down_prepare() in tg3_power_down() since tg3_close() calls it. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ec244c9..9351707 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4226,8 +4226,6 @@ static int tg3_power_down_prepare(struct tg3 *tp) static void tg3_power_down(struct tg3 *tp) { - tg3_power_down_prepare(tp); - pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); pci_set_power_state(tp->pdev, PCI_D3hot); } @@ -17743,6 +17741,23 @@ out: static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +static void tg3_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + + rtnl_lock(); + netif_device_detach(dev); + + if (netif_running(dev)) + dev_close(dev); + + if (system_state == SYSTEM_POWER_OFF) + tg3_power_down(tp); + + rtnl_unlock(); +} + /** * tg3_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -17898,6 +17913,7 @@ static struct pci_driver tg3_driver = { .remove = tg3_remove_one, .err_handler = &tg3_err_handler, .driver.pm = &tg3_pm_ops, + .shutdown = tg3_shutdown, }; module_pci_driver(tg3_driver); -- cgit v0.10.2 From 92e6457d4cf68ef69bc4f98330c93e198df06a43 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Mon, 29 Jul 2013 13:58:38 -0700 Subject: tg3: Enable support for timesync gpio output The PTP_CAPABLE tg3 devices have a gpio output that is toggled when the free running counter matches a watchdog value. This patch adds support to set the watchdog and enable this feature. Since the output is controlled via bits in the EAV_REF_CLCK_CTL register, we have to read-modify-write it when we stop/resume. Cc: Richard Cochran Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 9351707..f077c8f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6093,10 +6093,12 @@ static u64 tg3_refclk_read(struct tg3 *tp) /* tp->lock must be held */ static void tg3_refclk_write(struct tg3 *tp, u64 newval) { - tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP); + u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); - tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME); + tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); } static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); @@ -6212,6 +6214,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp, static int tg3_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + u32 clock_ctl; + int rval = 0; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + if (rq->perout.index != 0) + return -EINVAL; + + tg3_full_lock(tp, 0); + clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; + + if (on) { + u64 nsec; + + nsec = rq->perout.start.sec * 1000000000ULL + + rq->perout.start.nsec; + + if (rq->perout.period.sec || rq->perout.period.nsec) { + netdev_warn(tp->dev, + "Device supports only a one-shot timesync output, period must be 0\n"); + rval = -EINVAL; + goto err_out; + } + + if (nsec & (1ULL << 63)) { + netdev_warn(tp->dev, + "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); + rval = -EINVAL; + goto err_out; + } + + tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); + tw32(TG3_EAV_WATCHDOG0_MSB, + TG3_EAV_WATCHDOG0_EN | + ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); + + tw32(TG3_EAV_REF_CLCK_CTL, + clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); + } else { + tw32(TG3_EAV_WATCHDOG0_MSB, 0); + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); + } + +err_out: + tg3_full_unlock(tp); + return rval; + + default: + break; + } + return -EOPNOTSUPP; } @@ -6221,7 +6276,7 @@ static const struct ptp_clock_info tg3_ptp_caps = { .max_adj = 250000000, .n_alarm = 0, .n_ext_ts = 0, - .n_per_out = 0, + .n_per_out = 1, .pps = 0, .adjfreq = tg3_ptp_adjfreq, .adjtime = tg3_ptp_adjtime, diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index cd63d11..2e0f3d3 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1818,12 +1818,21 @@ #define TG3_EAV_REF_CLCK_CTL 0x00006908 #define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 #define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 +#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16) +#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17) + +#define TG3_EAV_WATCHDOG0_LSB 0x00006918 +#define TG3_EAV_WATCHDOG0_MSB 0x0000691c +#define TG3_EAV_WATCHDOG0_EN (1 << 31) +#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff + #define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 #define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) #define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) #define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff -/* 0x690c --> 0x7000 unused */ + +/* 0x692c --> 0x7000 unused */ /* NVRAM Control registers */ #define NVRAM_CMD 0x00007000 -- cgit v0.10.2 From 378b72c873b4828cde5b4bc85c7029d98f8f1df4 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Mon, 29 Jul 2013 13:58:39 -0700 Subject: tg3: Fix UDP fragments treated as RMCP The 5762 devices sometimes incorrectly treat udp fragments as RMCP packets and route to the APE. This patch sets the RX_MODE_IPV4_FRAG_FIX bit for these devices which enables the proper behaviour. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index f077c8f..6e59ef1 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -10420,6 +10420,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, 5755_PLUS)) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; + if (tg3_flag(tp, ENABLE_RSS)) tp->rx_mode |= RX_MODE_RSS_ENABLE | RX_MODE_RSS_ITBL_HASH_BITS_7 | diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 2e0f3d3..ddb8be1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -532,6 +532,7 @@ #define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 #define RX_MODE_RSS_ENABLE 0x00800000 #define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 +#define RX_MODE_IPV4_FRAG_FIX 0x02000000 #define MAC_RX_STATUS 0x0000046c #define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 #define RX_STATUS_XOFF_RCVD 0x00000002 -- cgit v0.10.2 From cd77b2ebf97dbe54ab250e4358a3f50d96053f26 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Mon, 29 Jul 2013 13:58:40 -0700 Subject: tg3: Update version to 3.133 Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 6e59ef1..2aa8624 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 132 +#define TG3_MIN_NUM 133 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "May 21, 2013" +#define DRV_MODULE_RELDATE "Jul 29, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 -- cgit v0.10.2 From 4ce1fd6110e1fb1747194d830a3f85952ad06172 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Thu, 25 Jul 2013 16:10:55 +0200 Subject: be2net: don't use dev_err when AER enabling fails The driver uses dev_err when enabling of AER fails (e.g. PCIe AER is not supported). The dev_info is more appropriate to avoid console pollution. Cc: sathya.perla@emulex.com Cc: subbu.seetharaman@emulex.com Cc: ajit.khaparde@emulex.com Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 67deb21..3df1503 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4232,7 +4232,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) status = pci_enable_pcie_error_reporting(pdev); if (status) - dev_err(&pdev->dev, "Could not use PCIe error reporting\n"); + dev_info(&pdev->dev, "Could not use PCIe error reporting\n"); status = be_ctrl_init(adapter); if (status) -- cgit v0.10.2 From a783c77076d9d06558a6d4bb07f0c1e0819c8c89 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:02 +0300 Subject: ath10k: fix teardown ordering This should fix memory corruption if HIF is tried to be restarted. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 33af467..92f35f2 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1263,7 +1263,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_process_ce(ar); ath10k_pci_cleanup_ce(ar); ath10k_pci_buffer_cleanup(ar); - ath10k_pci_ce_deinit(ar); } static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, @@ -2333,6 +2332,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) tasklet_kill(&ar_pci->msi_fw_err); ath10k_core_unregister(ar); + ath10k_pci_ce_deinit(ar); ath10k_pci_stop_intr(ar); pci_set_drvdata(pdev, NULL); -- cgit v0.10.2 From 80c78c67e6660db46f0a6e60941386ce22863e24 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:03 +0300 Subject: ath10k: fix possible deadlock It was possible to have a deadlock due to inverted locking of local->iflist_mtx and ath10k->conf_mutex. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index da5c333..608e240 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2581,8 +2581,9 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) ar_iter.ar = ar; mutex_lock(&ar->conf_mutex); - ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, - ath10k_set_rts_iter, &ar_iter); + ieee80211_iterate_active_interfaces_atomic( + hw, IEEE80211_IFACE_ITER_RESUME_ALL, + ath10k_set_rts_iter, &ar_iter); mutex_unlock(&ar->conf_mutex); return ar_iter.ret; @@ -2622,8 +2623,9 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) ar_iter.ar = ar; mutex_lock(&ar->conf_mutex); - ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, - ath10k_set_frag_iter, &ar_iter); + ieee80211_iterate_active_interfaces_atomic( + hw, IEEE80211_IFACE_ITER_RESUME_ALL, + ath10k_set_frag_iter, &ar_iter); mutex_unlock(&ar->conf_mutex); return ar_iter.ret; -- cgit v0.10.2 From 679c54a67141fb12fd579c6097ebfab4cecf0043 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:04 +0300 Subject: ath10k: setup rts/frag thresholds upon vdev creation mac80211 configures rts/frag thresholds per-hw not per-vif. ath10k FW expects those values to be set per-vdev (i.e. per-vif). ath10k should now respect rts/frag thresholds set before a given interface was brought up. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 608e240..2bfb8fd 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1756,7 +1756,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); enum wmi_sta_powersave_param param; int ret = 0; - u32 value; + u32 value, rts, frag; int bit; mutex_lock(&ar->conf_mutex); @@ -1859,6 +1859,24 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ath10k_warn("Failed to set PSPOLL count: %d\n", ret); } + rts = min_t(u32, ar->hw->wiphy->rts_threshold, ATH10K_RTS_MAX); + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_RTS_THRESHOLD, + rts); + if (ret) + ath10k_warn("failed to set rts threshold for vdev %d (%d)\n", + arvif->vdev_id, ret); + + frag = clamp_t(u32, ar->hw->wiphy->frag_threshold, + ATH10K_FRAGMT_THRESHOLD_MIN, + ATH10K_FRAGMT_THRESHOLD_MAX); + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + frag); + if (ret) + ath10k_warn("failed to set frag threshold for vdev %d (%d)\n", + arvif->vdev_id, ret); + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) ar->monitor_present = true; -- cgit v0.10.2 From 671b96db55c3ef372a5b01dfeb6b445b5c7cc3d1 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:05 +0300 Subject: ath10k: do not setup rts/frag thresholds for suspended interfaces mac80211 calls for rts/frag threshold hooks before any interface is brought back up again when resuming. We would set vdev parameters before given vdev is created lading to a FW crash. rts/frag thresholds will be re-set accordingly in add_interface() hook anyway. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 2bfb8fd..87fa3c2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2600,7 +2600,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) mutex_lock(&ar->conf_mutex); ieee80211_iterate_active_interfaces_atomic( - hw, IEEE80211_IFACE_ITER_RESUME_ALL, + hw, IEEE80211_IFACE_ITER_NORMAL, ath10k_set_rts_iter, &ar_iter); mutex_unlock(&ar->conf_mutex); @@ -2642,7 +2642,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) mutex_lock(&ar->conf_mutex); ieee80211_iterate_active_interfaces_atomic( - hw, IEEE80211_IFACE_ITER_RESUME_ALL, + hw, IEEE80211_IFACE_ITER_NORMAL, ath10k_set_frag_iter, &ar_iter); mutex_unlock(&ar->conf_mutex); -- cgit v0.10.2 From 3a0861fffd2238438101b830dc06b13233d1cf92 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:06 +0300 Subject: ath10k: remove ath10k_bus It serves no purpose. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 2b3426b..aabe166 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -441,7 +441,6 @@ static int ath10k_init_hw_params(struct ath10k *ar) } struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, - enum ath10k_bus bus, const struct ath10k_hif_ops *hif_ops) { struct ath10k *ar; @@ -458,7 +457,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, ar->hif.priv = hif_priv; ar->hif.ops = hif_ops; - ar->hif.bus = bus; ar->free_vdev_map = 0xFF; /* 8 vdevs */ diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 539336d..7489770 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -43,10 +43,6 @@ struct ath10k; -enum ath10k_bus { - ATH10K_BUS_PCI, -}; - struct ath10k_skb_cb { dma_addr_t paddr; bool is_mapped; @@ -274,7 +270,6 @@ struct ath10k { struct { void *priv; - enum ath10k_bus bus; const struct ath10k_hif_ops *ops; } hif; @@ -356,7 +351,6 @@ struct ath10k { }; struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, - enum ath10k_bus bus, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 92f35f2..926a7eb 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2157,8 +2157,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ath10k_pci_dump_features(ar_pci); - ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI, - &ath10k_pci_hif_ops); + ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); if (!ar) { ath10k_err("ath10k_core_create failed!\n"); ret = -EINVAL; -- cgit v0.10.2 From cba4ca7553deb8273d393645a1be8b01d3761543 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:07 +0300 Subject: ath10k: fix typo in define name Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 61a8ac7..b407929 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -79,7 +79,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); void __iomem *indicator_addr; - if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { + if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) { ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); return; } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 926a7eb..fe43b41 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2117,7 +2117,7 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) case ATH10K_PCI_FEATURE_MSI_X: ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); break; - case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND: + case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND: ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); break; } @@ -2144,7 +2144,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, switch (pci_dev->device) { case QCA988X_1_0_DEVICE_ID: - set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features); + set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features); break; case QCA988X_2_0_DEVICE_ID: set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); @@ -2165,7 +2165,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, } /* Enable QCA988X_1.0 HW workarounds */ - if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) + if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) spin_lock_init(&ar_pci->hw_v1_workaround_lock); ar_pci->ar = ar; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index d2a055a..d3a2e6c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -152,7 +152,7 @@ struct service_to_pipe { enum ath10k_pci_features { ATH10K_PCI_FEATURE_MSI_X = 0, - ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1, + ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1, /* keep last */ ATH10K_PCI_FEATURE_COUNT @@ -311,7 +311,7 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); void __iomem *addr = ar_pci->mem; - if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { + if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) { unsigned long irq_flags; spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); -- cgit v0.10.2 From 09af8f85c6a1fbe9c1ffc6253a4c0e8a0d8a9033 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:08 +0300 Subject: ath10k: silent warning in IBSS mode There is no TIM IE generated in IBSS beacons by mac80211. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 7d4b798..1b5312d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -501,8 +501,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, (u8 *)skb_tail_pointer(bcn) - ies); if (!ie) { - /* highly unlikely for mac80211 */ - ath10k_warn("no tim ie found;\n"); + if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + ath10k_warn("no tim ie found;\n"); return; } -- cgit v0.10.2 From 342004bea753434fb8e0d486afa8fc72b2be4bd7 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:09 +0300 Subject: ath10k: lower print level for a message Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 74363c9..7b5c334 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -751,8 +751,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, tx_alloc = ath10k_htc_get_credit_allocation(htc, conn_req->service_id); if (!tx_alloc) - ath10k_warn("HTC Service %s does not allocate target credits\n", - htc_service_name(conn_req->service_id)); + ath10k_dbg(ATH10K_DBG_HTC, + "HTC Service %s does not allocate target credits\n", + htc_service_name(conn_req->service_id)); skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) { -- cgit v0.10.2 From ed48b35644a08da5ee3c54570241a023abf3ea40 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:10 +0300 Subject: ath10k: provide errno if bmi read/write fails Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 1a2ef51..aeae029 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -105,7 +105,8 @@ int ath10k_bmi_read_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { - ath10k_warn("unable to read from the device\n"); + ath10k_warn("unable to read from the device (%d)\n", + ret); return ret; } @@ -149,7 +150,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device\n"); + ath10k_warn("unable to write to the device (%d)\n", + ret); return ret; } -- cgit v0.10.2 From 7a5fe3f87c39565e4c5eb7e39638fc979c8fd98b Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:11 +0300 Subject: ath10k: change function to take struct ath10k as arg This aligns it to the argument list of other similar functions. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index fe43b41..445ec78 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2058,9 +2058,9 @@ static int ath10k_pci_reset_target(struct ath10k *ar) return 0; } -static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci) +static void ath10k_pci_device_reset(struct ath10k *ar) { - struct ath10k *ar = ar_pci->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); void __iomem *mem = ar_pci->mem; int i; u32 val; @@ -2255,7 +2255,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, * is in an unexpected state. We try to catch that here in order to * reset the Target and retry the probe. */ - ath10k_pci_device_reset(ar_pci); + ath10k_pci_device_reset(ar); ret = ath10k_pci_reset_target(ar); if (ret) -- cgit v0.10.2 From e799bbffdd6e67305b057e3c13c0eed23523bdad Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:12 +0300 Subject: ath10k: rename hif callback The `set_callbacks` is a more appopriate name for the function. Let's leave `init` for something else. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 73a24d4..010e265 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -66,8 +66,8 @@ struct ath10k_hif_ops { */ void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force); - void (*init)(struct ath10k *ar, - struct ath10k_hif_cb *callbacks); + void (*set_callbacks)(struct ath10k *ar, + struct ath10k_hif_cb *callbacks); u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); }; @@ -122,10 +122,10 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar, ar->hif.ops->send_complete_check(ar, pipe_id, force); } -static inline void ath10k_hif_init(struct ath10k *ar, - struct ath10k_hif_cb *callbacks) +static inline void ath10k_hif_set_callbacks(struct ath10k *ar, + struct ath10k_hif_cb *callbacks) { - ar->hif.ops->init(ar, callbacks); + ar->hif.ops->set_callbacks(ar, callbacks); } static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 7b5c334..f37a6e1 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -987,7 +987,7 @@ struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, /* Get HIF default pipe for HTC message exchange */ ep = &htc->endpoint[ATH10K_HTC_EP_0]; - ath10k_hif_init(ar, &htc_callbacks); + ath10k_hif_set_callbacks(ar, &htc_callbacks); ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); init_completion(&htc->ctl_resp); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 445ec78..7da0983 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -744,8 +744,8 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, ath10k_ce_per_engine_service(ar, pipe); } -static void ath10k_pci_hif_post_init(struct ath10k *ar, - struct ath10k_hif_cb *callbacks) +static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, + struct ath10k_hif_cb *callbacks) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -1742,7 +1742,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, .get_default_pipe = ath10k_pci_hif_get_default_pipe, .send_complete_check = ath10k_pci_hif_send_complete_check, - .init = ath10k_pci_hif_post_init, + .set_callbacks = ath10k_pci_hif_set_callbacks, .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, }; -- cgit v0.10.2 From cd003fad17d9258efdc5dd658666731377cfebd1 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:13 +0300 Subject: ath10k: embed HTC struct inside ath10k This reduces number of allocations and simplifies memory managemnt. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index aabe166..4199560 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -100,7 +100,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar) goto conn_fail; /* Start HTC */ - status = ath10k_htc_start(ar->htc); + status = ath10k_htc_start(&ar->htc); if (status) goto conn_fail; @@ -116,7 +116,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar) return 0; timeout: - ath10k_htc_stop(ar->htc); + ath10k_htc_stop(&ar->htc); conn_fail: return status; } @@ -505,7 +505,6 @@ EXPORT_SYMBOL(ath10k_core_destroy); int ath10k_core_register(struct ath10k *ar) { - struct ath10k_htc_ops htc_ops; struct bmi_target_info target_info; int status; @@ -534,26 +533,26 @@ int ath10k_core_register(struct ath10k *ar) if (status) goto err; - htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; + ar->htc.htc_ops.target_send_suspend_complete = + ath10k_send_suspend_complete; - ar->htc = ath10k_htc_create(ar, &htc_ops); - if (IS_ERR(ar->htc)) { - status = PTR_ERR(ar->htc); - ath10k_err("could not create HTC (%d)\n", status); + status = ath10k_htc_init(ar); + if (status) { + ath10k_err("could not init HTC (%d)\n", status); goto err; } status = ath10k_bmi_done(ar); if (status) - goto err_htc_destroy; + goto err; status = ath10k_wmi_attach(ar); if (status) { ath10k_err("WMI attach failed: %d\n", status); - goto err_htc_destroy; + goto err; } - status = ath10k_htc_wait_target(ar->htc); + status = ath10k_htc_wait_target(&ar->htc); if (status) goto err_wmi_detach; @@ -605,13 +604,11 @@ int ath10k_core_register(struct ath10k *ar) err_unregister_mac: ath10k_mac_unregister(ar); err_disconnect_htc: - ath10k_htc_stop(ar->htc); + ath10k_htc_stop(&ar->htc); err_htt_detach: ath10k_htt_detach(ar->htt); err_wmi_detach: ath10k_wmi_detach(ar); -err_htc_destroy: - ath10k_htc_destroy(ar->htc); err: return status; } @@ -623,10 +620,9 @@ void ath10k_core_unregister(struct ath10k *ar) * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ ath10k_mac_unregister(ar); - ath10k_htc_stop(ar->htc); + ath10k_htc_stop(&ar->htc); ath10k_htt_detach(ar->htt); ath10k_wmi_detach(ar); - ath10k_htc_destroy(ar->htc); } EXPORT_SYMBOL(ath10k_core_unregister); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 7489770..dfd4fe1 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -279,8 +279,8 @@ struct ath10k { bool is_target_paused; struct ath10k_bmi bmi; + struct ath10k_htc htc; - struct ath10k_htc *htc; struct ath10k_htt *htt; struct ath10k_hw_params { diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index f37a6e1..7d5a366 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -265,7 +265,7 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb, unsigned int eid) { - struct ath10k_htc *htc = ar->htc; + struct ath10k_htc *htc = &ar->htc; struct ath10k_htc_ep *ep = &htc->endpoint[eid]; bool stopping; @@ -414,7 +414,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, u8 pipe_id) { int status = 0; - struct ath10k_htc *htc = ar->htc; + struct ath10k_htc *htc = &ar->htc; struct ath10k_htc_hdr *hdr; struct ath10k_htc_ep *ep; u16 payload_len; @@ -961,22 +961,14 @@ void ath10k_htc_stop(struct ath10k_htc *htc) } /* registered target arrival callback from the HIF layer */ -struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, - struct ath10k_htc_ops *htc_ops) +int ath10k_htc_init(struct ath10k *ar) { struct ath10k_hif_cb htc_callbacks; struct ath10k_htc_ep *ep = NULL; - struct ath10k_htc *htc = NULL; - - /* FIXME: use struct ath10k instead */ - htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL); - if (!htc) - return ERR_PTR(-ENOMEM); + struct ath10k_htc *htc = &ar->htc; spin_lock_init(&htc->tx_lock); - memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops)); - ath10k_htc_reset_endpoint_states(htc); /* setup HIF layer callbacks */ @@ -992,10 +984,5 @@ struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, init_completion(&htc->ctl_resp); - return htc; -} - -void ath10k_htc_destroy(struct ath10k_htc *htc) -{ - kfree(htc); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index fa45844..1606c9f 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -352,8 +352,7 @@ struct ath10k_htc { bool stopping; }; -struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, - struct ath10k_htc_ops *htc_ops); +int ath10k_htc_init(struct ath10k *ar); int ath10k_htc_wait_target(struct ath10k_htc *htc); int ath10k_htc_start(struct ath10k_htc *htc); int ath10k_htc_connect_service(struct ath10k_htc *htc, @@ -362,7 +361,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *packet); void ath10k_htc_stop(struct ath10k_htc *htc); -void ath10k_htc_destroy(struct ath10k_htc *htc); struct sk_buff *ath10k_htc_alloc_skb(int size); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 185a546..2bfb9b4 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -36,7 +36,7 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt) /* connect to control service */ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; - status = ath10k_htc_connect_service(htt->ar->htc, &conn_req, + status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req, &conn_resp); if (status) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index ef79106..fa568f1 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -92,7 +92,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt) /* At the beginning free queue number should hint us the maximum * queue length */ - pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id; + pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id; htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, pipe); @@ -194,7 +194,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) ATH10K_SKB_CB(skb)->htt.is_conf = true; - ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; @@ -281,7 +281,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) ATH10K_SKB_CB(skb)->htt.is_conf = true; - ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; @@ -346,7 +346,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_cb->htt.refcount = 2; skb_cb->htt.msdu = msdu; - res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); + res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; @@ -486,7 +486,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_cb->htt.txfrag = txfrag; skb_cb->htt.msdu = msdu; - res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); + res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 1b5312d..681e941 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -111,7 +111,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); - status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb); + status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); if (status) { dev_kfree_skb_any(skb); atomic_dec(&ar->wmi.pending_tx_count); @@ -1114,7 +1114,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar) /* connect to control service */ conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; - status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp); + status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); if (status) { ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", status); -- cgit v0.10.2 From edb8236df4d0429d973e093062a4806471f5efa2 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:14 +0300 Subject: ath10k: embed HTT struct inside ath10k This reduces number of allocations and simplifies memory managemnt. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 4199560..f1312fa 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -556,9 +556,9 @@ int ath10k_core_register(struct ath10k *ar) if (status) goto err_wmi_detach; - ar->htt = ath10k_htt_attach(ar); - if (!ar->htt) { - status = -ENOMEM; + status = ath10k_htt_attach(ar); + if (status) { + ath10k_err("could not attach htt (%d)\n", status); goto err_wmi_detach; } @@ -585,7 +585,7 @@ int ath10k_core_register(struct ath10k *ar) goto err_disconnect_htc; } - status = ath10k_htt_attach_target(ar->htt); + status = ath10k_htt_attach_target(&ar->htt); if (status) goto err_disconnect_htc; @@ -606,7 +606,7 @@ err_unregister_mac: err_disconnect_htc: ath10k_htc_stop(&ar->htc); err_htt_detach: - ath10k_htt_detach(ar->htt); + ath10k_htt_detach(&ar->htt); err_wmi_detach: ath10k_wmi_detach(ar); err: @@ -621,7 +621,7 @@ void ath10k_core_unregister(struct ath10k *ar) * unhappy about callback failures. */ ath10k_mac_unregister(ar); ath10k_htc_stop(&ar->htc); - ath10k_htt_detach(ar->htt); + ath10k_htt_detach(&ar->htt); ath10k_wmi_detach(ar); } EXPORT_SYMBOL(ath10k_core_unregister); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index dfd4fe1..2f7065c 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -23,6 +23,7 @@ #include #include +#include "htt.h" #include "htc.h" #include "hw.h" #include "targaddrs.h" @@ -273,15 +274,13 @@ struct ath10k { const struct ath10k_hif_ops *ops; } hif; - struct ath10k_wmi wmi; - wait_queue_head_t event_queue; bool is_target_paused; struct ath10k_bmi bmi; + struct ath10k_wmi wmi; struct ath10k_htc htc; - - struct ath10k_htt *htt; + struct ath10k_htt htt; struct ath10k_hw_params { u32 id; diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 2bfb9b4..39342c5 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -16,6 +16,7 @@ */ #include +#include #include "htt.h" #include "core.h" @@ -47,15 +48,11 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt) return 0; } -struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) +int ath10k_htt_attach(struct ath10k *ar) { - struct ath10k_htt *htt; + struct ath10k_htt *htt = &ar->htt; int ret; - htt = kzalloc(sizeof(*htt), GFP_KERNEL); - if (!htt) - return NULL; - htt->ar = ar; htt->max_throughput_mbps = 800; @@ -65,8 +62,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) * since ath10k_htt_rx_attach involves sending a rx ring configure * message to the target. */ - if (ath10k_htt_htc_attach(htt)) + ret = ath10k_htt_htc_attach(htt); + if (ret) { + ath10k_err("could not attach htt htc (%d)\n", ret); goto err_htc_attach; + } ret = ath10k_htt_tx_attach(htt); if (ret) { @@ -74,8 +74,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) goto err_htc_attach; } - if (ath10k_htt_rx_attach(htt)) + ret = ath10k_htt_rx_attach(htt); + if (ret) { + ath10k_err("could not attach htt rx (%d)\n", ret); goto err_rx_attach; + } /* * Prefetch enough data to satisfy target @@ -89,13 +92,12 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) 8 + /* llc snap */ 2; /* ip4 dscp or ip6 priority */ - return htt; + return 0; err_rx_attach: ath10k_htt_tx_detach(htt); err_htc_attach: - kfree(htt); - return NULL; + return ret; } #define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) @@ -148,5 +150,4 @@ void ath10k_htt_detach(struct ath10k_htt *htt) { ath10k_htt_rx_detach(htt); ath10k_htt_tx_detach(htt); - kfree(htt); } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index a7a7aa0..318be46 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -20,7 +20,6 @@ #include -#include "core.h" #include "htc.h" #include "rx_desc.h" @@ -1317,7 +1316,7 @@ struct htt_rx_desc { #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) -struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar); +int ath10k_htt_attach(struct ath10k *ar); int ath10k_htt_attach_target(struct ath10k_htt *htt); void ath10k_htt_detach(struct ath10k_htt *htt); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index de058d7..04f08d9 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -15,6 +15,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include "core.h" #include "htc.h" #include "htt.h" #include "txrx.h" @@ -1036,7 +1037,7 @@ end: void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { - struct ath10k_htt *htt = ar->htt; + struct ath10k_htt *htt = &ar->htt; struct htt_resp *resp = (struct htt_resp *)skb->data; /* confirm alignment */ diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index fa568f1..dc3f3e8 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -153,7 +153,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt) void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - struct ath10k_htt *htt = ar->htt; + struct ath10k_htt *htt = &ar->htt; if (skb_cb->htt.is_conf) { dev_kfree_skb_any(skb); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 87fa3c2..f91701c 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1397,15 +1397,15 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) int ret; if (ieee80211_is_mgmt(hdr->frame_control)) - ret = ath10k_htt_mgmt_tx(ar->htt, skb); + ret = ath10k_htt_mgmt_tx(&ar->htt, skb); else if (ieee80211_is_nullfunc(hdr->frame_control)) /* FW does not report tx status properly for NullFunc frames * unless they are sent through mgmt tx path. mac80211 sends * those frames when it detects link/beacon loss and depends on * the tx status to be correct. */ - ret = ath10k_htt_mgmt_tx(ar->htt, skb); + ret = ath10k_htt_mgmt_tx(&ar->htt, skb); else - ret = ath10k_htt_tx(ar->htt, skb); + ret = ath10k_htt_tx(&ar->htt, skb); if (ret) { ath10k_warn("tx failed (%d). dropping packet.\n", ret); @@ -2659,12 +2659,12 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) if (drop) return; - ret = wait_event_timeout(ar->htt->empty_tx_wq, ({ + ret = wait_event_timeout(ar->htt.empty_tx_wq, ({ bool empty; - spin_lock_bh(&ar->htt->tx_lock); - empty = bitmap_empty(ar->htt->used_msdu_ids, - ar->htt->max_num_pending_tx); - spin_unlock_bh(&ar->htt->tx_lock); + spin_lock_bh(&ar->htt.tx_lock); + empty = bitmap_empty(ar->htt.used_msdu_ids, + ar->htt.max_num_pending_tx); + spin_unlock_bh(&ar->htt.tx_lock); (empty); }), ATH10K_FLUSH_TIMEOUT_HZ); if (ret <= 0) -- cgit v0.10.2 From 548db54cc1890b161e87d4ca1028cf77f51fd16c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:15 +0300 Subject: ath10k: improve locking Add more lockdep asserts and a few conf_mutex locks. It's better to be on the safe side. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index f91701c..278c968 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -43,6 +43,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif, .macaddr = macaddr, }; + lockdep_assert_held(&arvif->ar->conf_mutex); + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) arg.key_flags = WMI_KEY_PAIRWISE; else @@ -87,6 +89,8 @@ static int ath10k_install_key(struct ath10k_vif *arvif, struct ath10k *ar = arvif->ar; int ret; + lockdep_assert_held(&ar->conf_mutex); + INIT_COMPLETION(ar->install_key_done); ret = ath10k_send_key(arvif, key, cmd, macaddr); @@ -372,6 +376,8 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar) { int ret; + lockdep_assert_held(&ar->conf_mutex); + ret = wait_for_completion_timeout(&ar->vdev_setup_done, ATH10K_VDEV_SETUP_TIMEOUT_HZ); if (ret == 0) @@ -605,6 +611,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, { int ret = 0; + lockdep_assert_held(&arvif->ar->conf_mutex); + if (!info->enable_beacon) { ath10k_vdev_stop(arvif); return; @@ -631,6 +639,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, { int ret = 0; + lockdep_assert_held(&arvif->ar->conf_mutex); + if (!info->ibss_joined) { ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); if (ret) @@ -680,6 +690,8 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) enum wmi_sta_ps_mode psmode; int ret; + lockdep_assert_held(&arvif->ar->conf_mutex); + if (vif->type != NL80211_IFTYPE_STATION) return; @@ -722,6 +734,8 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + lockdep_assert_held(&ar->conf_mutex); + memcpy(arg->addr, sta->addr, ETH_ALEN); arg->vdev_id = arvif->vdev_id; arg->peer_aid = sta->aid; @@ -764,6 +778,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, const u8 *rsnie = NULL; const u8 *wpaie = NULL; + lockdep_assert_held(&ar->conf_mutex); + bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan, info->bssid, NULL, 0, 0, 0); if (bss) { @@ -804,6 +820,8 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar, u32 ratemask; int i; + lockdep_assert_held(&ar->conf_mutex); + sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band]; rates = sband->bitrates; @@ -827,6 +845,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, int smps; int i, n; + lockdep_assert_held(&ar->conf_mutex); + if (!ht_cap->ht_supported) return; @@ -905,6 +925,8 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, u32 uapsd = 0; u32 max_sp = 0; + lockdep_assert_held(&ar->conf_mutex); + if (sta->wme) arg->peer_flags |= WMI_PEER_QOS; @@ -1056,6 +1078,8 @@ static int ath10k_peer_assoc(struct ath10k *ar, { struct wmi_peer_assoc_complete_arg arg; + lockdep_assert_held(&ar->conf_mutex); + memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); @@ -1079,6 +1103,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_sta *ap_sta; int ret; + lockdep_assert_held(&ar->conf_mutex); + rcu_read_lock(); ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); @@ -1119,6 +1145,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret; + lockdep_assert_held(&ar->conf_mutex); + /* * For some reason, calling VDEV-DOWN before VDEV-STOP * makes the FW to send frames via HTT after disassociation. @@ -1152,6 +1180,8 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, { int ret = 0; + lockdep_assert_held(&ar->conf_mutex); + ret = ath10k_peer_assoc(ar, arvif, sta, NULL); if (ret) { ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); @@ -1172,6 +1202,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, { int ret = 0; + lockdep_assert_held(&ar->conf_mutex); + ret = ath10k_clear_peer_keys(arvif, sta->addr); if (ret) { ath10k_warn("could not clear all peer wep keys (%d)\n", ret); @@ -1198,6 +1230,8 @@ static int ath10k_update_channel_list(struct ath10k *ar) int ret; int i; + lockdep_assert_held(&ar->conf_mutex); + bands = hw->wiphy->bands; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!bands[band]) @@ -1284,6 +1318,8 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, struct ath10k *ar = hw->priv; int ret; + mutex_lock(&ar->conf_mutex); + ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); ret = ath10k_update_channel_list(ar); @@ -1301,6 +1337,8 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, regpair->reg_5ghz_ctl); if (ret) ath10k_warn("could not set pdev regdomain (%d)\n", ret); + + mutex_unlock(&ar->conf_mutex); } /***************/ @@ -1678,6 +1716,8 @@ static int ath10k_start(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; int ret; + mutex_lock(&ar->conf_mutex); + ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); if (ret) ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", @@ -1688,6 +1728,7 @@ static int ath10k_start(struct ieee80211_hw *hw) ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", ret); + mutex_unlock(&ar->conf_mutex); return 0; } @@ -1695,9 +1736,11 @@ static void ath10k_stop(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; - /* avoid leaks in case FW never confirms scan for offchannel */ - cancel_work_sync(&ar->offchan_tx_work); + mutex_lock(&ar->conf_mutex); ath10k_offchan_tx_purge(ar); + mutex_unlock(&ar->conf_mutex); + + cancel_work_sync(&ar->offchan_tx_work); } static int ath10k_config(struct ieee80211_hw *hw, u32 changed) @@ -2381,6 +2424,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, u32 value = 0; int ret = 0; + lockdep_assert_held(&ar->conf_mutex); + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; @@ -2576,6 +2621,8 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); u32 rts = ar_iter->ar->hw->wiphy->rts_threshold; + lockdep_assert_held(&arvif->ar->conf_mutex); + rts = min_t(u32, rts, ATH10K_RTS_MAX); ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, @@ -2614,6 +2661,8 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; int ret; + lockdep_assert_held(&arvif->ar->conf_mutex); + frag = clamp_t(u32, frag, ATH10K_FRAGMT_THRESHOLD_MIN, ATH10K_FRAGMT_THRESHOLD_MAX); @@ -2659,6 +2708,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) if (drop) return; + mutex_lock(&ar->conf_mutex); + ret = wait_event_timeout(ar->htt.empty_tx_wq, ({ bool empty; spin_lock_bh(&ar->htt.tx_lock); @@ -2669,6 +2720,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) }), ATH10K_FLUSH_TIMEOUT_HZ); if (ret <= 0) ath10k_warn("tx not flushed\n"); + + mutex_unlock(&ar->conf_mutex); } /* TODO: Implement this function properly -- cgit v0.10.2 From adb8c9b77cd182b1b3f2b145b8db4043623e3745 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:16 +0300 Subject: ath10k: abort scan properly if wmi_scan_stop fails Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 278c968..5082503 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1590,6 +1590,10 @@ static int ath10k_abort_scan(struct ath10k *ar) ret = ath10k_wmi_stop_scan(ar, &arg); if (ret) { ath10k_warn("could not submit wmi stop scan (%d)\n", ret); + spin_lock_bh(&ar->data_lock); + ar->scan.in_progress = false; + ath10k_offchan_tx_purge(ar); + spin_unlock_bh(&ar->data_lock); return -EIO; } -- cgit v0.10.2 From e0c508ab099bda4629768107d14bbe9afa5c6705 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 5 Jul 2013 16:15:17 +0300 Subject: ath10k: add missing debug prints Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 681e941..b7e7e45 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1748,6 +1748,9 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar, if (arg->key_data) memcpy(cmd->key_data, arg->key_data, arg->key_len); + ath10k_dbg(ATH10K_DBG_WMI, + "wmi vdev install key idx %d cipher %d len %d\n", + arg->key_idx, arg->key_cipher, arg->key_len); return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); } @@ -2011,6 +2014,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, cmd->peer_vht_rates.tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); + ath10k_dbg(ATH10K_DBG_WMI, + "wmi peer assoc vdev %d addr %pM\n", + arg->vdev_id, arg->addr); return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); } -- cgit v0.10.2 From 8c5c53682f0da87b91ff87d060a5d92df524c13d Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:50 +0200 Subject: ath10k: decouple pci start/stop logic Split logic that prepares the device for BMI phase/cleans up related resources. This is necessary for ath10k to be able to restart hw on the fly without reloading the module. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 010e265..00d2940 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -46,8 +46,11 @@ struct ath10k_hif_ops { void *request, u32 request_len, void *response, u32 *response_len); + /* Post BMI phase, after FW is loaded. Starts regular operation */ int (*start)(struct ath10k *ar); + /* Clean up what start() did. This does not revert to BMI phase. If + * desired so, call power_down() and power_up() */ void (*stop)(struct ath10k *ar); int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, @@ -70,6 +73,13 @@ struct ath10k_hif_ops { struct ath10k_hif_cb *callbacks); u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); + + /* Power up the device and enter BMI transfer mode for FW download */ + int (*power_up)(struct ath10k *ar); + + /* Power down the device and free up resources. stop() must be called + * before this if start() was called earlier */ + void (*power_down)(struct ath10k *ar); }; @@ -134,4 +144,14 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, return ar->hif.ops->get_free_queue_number(ar, pipe_id); } +static inline int ath10k_hif_power_up(struct ath10k *ar) +{ + return ar->hif.ops->power_up(ar); +} + +static inline void ath10k_hif_power_down(struct ath10k *ar) +{ + ar->hif.ops->power_down(ar); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 7da0983..c15b0c5 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -54,6 +54,8 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, int num); static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); static void ath10k_pci_stop_ce(struct ath10k *ar); +static void ath10k_pci_device_reset(struct ath10k *ar); +static int ath10k_pci_reset_target(struct ath10k *ar); static const struct ce_attr host_ce_config_wlan[] = { /* host->target HTC control and raw streams */ @@ -1734,6 +1736,66 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) ath10k_pci_sleep(ar); } +static int ath10k_pci_hif_power_up(struct ath10k *ar) +{ + int ret; + + /* + * Bring the target up cleanly. + * + * The target may be in an undefined state with an AUX-powered Target + * and a Host in WoW mode. If the Host crashes, loses power, or is + * restarted (without unloading the driver) then the Target is left + * (aux) powered and running. On a subsequent driver load, the Target + * is in an unexpected state. We try to catch that here in order to + * reset the Target and retry the probe. + */ + ath10k_pci_device_reset(ar); + + ret = ath10k_pci_reset_target(ar); + if (ret) + goto err; + + if (ath10k_target_ps) { + ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n"); + } else { + /* Force AWAKE forever */ + ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n"); + ath10k_do_pci_wake(ar); + } + + ret = ath10k_pci_ce_init(ar); + if (ret) + goto err_ps; + + ret = ath10k_pci_init_config(ar); + if (ret) + goto err_ce; + + ret = ath10k_pci_wake_target_cpu(ar); + if (ret) { + ath10k_err("could not wake up target CPU (%d)\n", ret); + goto err_ce; + } + + return 0; + +err_ce: + ath10k_pci_ce_deinit(ar); +err_ps: + if (!ath10k_target_ps) + ath10k_do_pci_sleep(ar); +err: + return ret; +} + +static void ath10k_pci_hif_power_down(struct ath10k *ar) +{ + ath10k_pci_ce_deinit(ar); + if (!ath10k_target_ps) + ath10k_do_pci_sleep(ar); +} + static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .send_head = ath10k_pci_hif_send_head, .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, @@ -1744,6 +1806,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .send_complete_check = ath10k_pci_hif_send_complete_check, .set_callbacks = ath10k_pci_hif_set_callbacks, .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, + .power_up = ath10k_pci_hif_power_up, + .power_down = ath10k_pci_hif_power_down, }; static void ath10k_pci_ce_tasklet(unsigned long ptr) @@ -2245,54 +2309,22 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_iomap; } - /* - * Bring the target up cleanly. - * - * The target may be in an undefined state with an AUX-powered Target - * and a Host in WoW mode. If the Host crashes, loses power, or is - * restarted (without unloading the driver) then the Target is left - * (aux) powered and running. On a subsequent driver load, the Target - * is in an unexpected state. We try to catch that here in order to - * reset the Target and retry the probe. - */ - ath10k_pci_device_reset(ar); - - ret = ath10k_pci_reset_target(ar); - if (ret) - goto err_intr; - - if (ath10k_target_ps) { - ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n"); - } else { - /* Force AWAKE forever */ - ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n"); - ath10k_do_pci_wake(ar); - } - - ret = ath10k_pci_ce_init(ar); - if (ret) - goto err_intr; - - ret = ath10k_pci_init_config(ar); - if (ret) - goto err_ce; - - ret = ath10k_pci_wake_target_cpu(ar); + ret = ath10k_pci_hif_power_up(ar); if (ret) { - ath10k_err("could not wake up target CPU (%d)\n", ret); - goto err_ce; + ath10k_err("could not start pci hif (%d)\n", ret); + goto err_intr; } ret = ath10k_core_register(ar); if (ret) { ath10k_err("could not register driver core (%d)\n", ret); - goto err_ce; + goto err_hif; } return 0; -err_ce: - ath10k_pci_ce_deinit(ar); +err_hif: + ath10k_pci_hif_power_down(ar); err_intr: ath10k_pci_stop_intr(ar); err_iomap: @@ -2331,7 +2363,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) tasklet_kill(&ar_pci->msi_fw_err); ath10k_core_unregister(ar); - ath10k_pci_ce_deinit(ar); + ath10k_pci_hif_power_down(ar); ath10k_pci_stop_intr(ar); pci_set_drvdata(pdev, NULL); -- cgit v0.10.2 From dd30a36e11a1315751c668832cbaa2c42f9e9002 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:51 +0200 Subject: ath10k: decouple core start/stop logic Enables code reuse for proper hw reconfiguration that is in turn required for proper suspend/hibernation/wowlan support. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index f1312fa..dcddae4 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -502,8 +502,7 @@ void ath10k_core_destroy(struct ath10k *ar) } EXPORT_SYMBOL(ath10k_core_destroy); - -int ath10k_core_register(struct ath10k *ar) +int ath10k_core_start(struct ath10k *ar) { struct bmi_target_info target_info; int status; @@ -589,9 +588,36 @@ int ath10k_core_register(struct ath10k *ar) if (status) goto err_disconnect_htc; + return 0; + +err_disconnect_htc: + ath10k_htc_stop(&ar->htc); +err_htt_detach: + ath10k_htt_detach(&ar->htt); +err_wmi_detach: + ath10k_wmi_detach(ar); +err: + return status; +} + +void ath10k_core_stop(struct ath10k *ar) +{ + ath10k_htc_stop(&ar->htc); + ath10k_htt_detach(&ar->htt); + ath10k_wmi_detach(ar); +} + +int ath10k_core_register(struct ath10k *ar) +{ + int status; + + status = ath10k_core_start(ar); + if (status) + goto err; + status = ath10k_mac_register(ar); if (status) - goto err_disconnect_htc; + goto err_core_stop; status = ath10k_debug_create(ar); if (status) { @@ -603,12 +629,8 @@ int ath10k_core_register(struct ath10k *ar) err_unregister_mac: ath10k_mac_unregister(ar); -err_disconnect_htc: - ath10k_htc_stop(&ar->htc); -err_htt_detach: - ath10k_htt_detach(&ar->htt); -err_wmi_detach: - ath10k_wmi_detach(ar); +err_core_stop: + ath10k_core_stop(ar); err: return status; } @@ -620,9 +642,7 @@ void ath10k_core_unregister(struct ath10k *ar) * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ ath10k_mac_unregister(ar); - ath10k_htc_stop(&ar->htc); - ath10k_htt_detach(&ar->htt); - ath10k_wmi_detach(ar); + ath10k_core_stop(ar); } EXPORT_SYMBOL(ath10k_core_unregister); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 2f7065c..5a0b2ce 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -353,6 +353,8 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); +int ath10k_core_start(struct ath10k *ar); +void ath10k_core_stop(struct ath10k *ar); int ath10k_core_register(struct ath10k *ar); void ath10k_core_unregister(struct ath10k *ar); -- cgit v0.10.2 From f7843d7f1ac57fb388d5814cdcbd309fa18214be Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:52 +0200 Subject: ath10k: allow deferred regd update Regulatory domain notification hook can be called regardless of the hw state (i.e. before start mac80211 callback). Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 5a0b2ce..d5b2533 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -247,6 +247,11 @@ struct ath10k_debug { struct completion event_stats_compl; }; +enum ath10k_state { + ATH10K_STATE_OFF = 0, + ATH10K_STATE_ON, +}; + struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; @@ -344,6 +349,8 @@ struct ath10k { struct completion offchan_tx_completed; struct sk_buff *offchan_tx_skb; + enum ath10k_state state; + #ifdef CONFIG_ATH10K_DEBUGFS struct ath10k_debug debug; #endif diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 5082503..4627c16 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1310,23 +1310,19 @@ static int ath10k_update_channel_list(struct ath10k *ar) return ret; } -static void ath10k_reg_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static void ath10k_regd_update(struct ath10k *ar) { - struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct reg_dmn_pair_mapping *regpair; - struct ath10k *ar = hw->priv; int ret; - mutex_lock(&ar->conf_mutex); - - ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); + lockdep_assert_held(&ar->conf_mutex); ret = ath10k_update_channel_list(ar); if (ret) ath10k_warn("could not update channel list (%d)\n", ret); regpair = ar->ath_common.regulatory.regpair; + /* Target allows setting up per-band regdomain but ath_common provides * a combined one only */ ret = ath10k_wmi_pdev_set_regdomain(ar, @@ -1337,7 +1333,19 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, regpair->reg_5ghz_ctl); if (ret) ath10k_warn("could not set pdev regdomain (%d)\n", ret); +} +static void ath10k_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct ath10k *ar = hw->priv; + + ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); + + mutex_lock(&ar->conf_mutex); + if (ar->state == ATH10K_STATE_ON) + ath10k_regd_update(ar); mutex_unlock(&ar->conf_mutex); } @@ -1732,6 +1740,9 @@ static int ath10k_start(struct ieee80211_hw *hw) ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", ret); + ar->state = ATH10K_STATE_ON; + ath10k_regd_update(ar); + mutex_unlock(&ar->conf_mutex); return 0; } @@ -1742,6 +1753,7 @@ static void ath10k_stop(struct ieee80211_hw *hw) mutex_lock(&ar->conf_mutex); ath10k_offchan_tx_purge(ar); + ar->state = ATH10K_STATE_OFF; mutex_unlock(&ar->conf_mutex); cancel_work_sync(&ar->offchan_tx_work); -- cgit v0.10.2 From 64d151d47030d0d73d82bb6fa7bfe1e29385ed43 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:53 +0200 Subject: ath10k: reset BMI state upon init This is necessary if we want to be able to restart hw on-the-fly. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index aeae029..744da6d 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -20,6 +20,12 @@ #include "debug.h" #include "htc.h" +void ath10k_bmi_start(struct ath10k *ar) +{ + ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n"); + ar->bmi.done_sent = false; +} + int ath10k_bmi_done(struct ath10k *ar) { struct bmi_cmd cmd; diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 32c56aa..8d81ce1 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -184,6 +184,7 @@ struct bmi_target_info { #define BMI_CE_NUM_TO_TARG 0 #define BMI_CE_NUM_TO_HOST 1 +void ath10k_bmi_start(struct ath10k *ar); int ath10k_bmi_done(struct ath10k *ar); int ath10k_bmi_get_target_info(struct ath10k *ar, struct bmi_target_info *target_info); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index dcddae4..3d75c6a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -507,6 +507,8 @@ int ath10k_core_start(struct ath10k *ar) struct bmi_target_info target_info; int status; + ath10k_bmi_start(ar); + memset(&target_info, 0, sizeof(target_info)); status = ath10k_bmi_get_target_info(ar, &target_info); if (status) -- cgit v0.10.2 From 8cd13cad1caf94ba66f626a94887b795fe23f939 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:54 +0200 Subject: ath10k: decouple suspend code Split up fw-related and hw-related suspension code. Although we don't advertise WoW support to mac80211 yet it's useful to keep the code in suspend/resume hooks. At this point there's no need to keep pci pm ops. In case of WoW mac80211 calls ath10k_suspend() which should take care of entering low-power mode. In case WoW is not available mac80211 will go through regular interface teradown and use start/stop. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 3d75c6a..01c1d82 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -648,34 +648,6 @@ void ath10k_core_unregister(struct ath10k *ar) } EXPORT_SYMBOL(ath10k_core_unregister); -int ath10k_core_target_suspend(struct ath10k *ar) -{ - int ret; - - ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); - - ret = ath10k_wmi_pdev_suspend_target(ar); - if (ret) - ath10k_warn("could not suspend target (%d)\n", ret); - - return ret; -} -EXPORT_SYMBOL(ath10k_core_target_suspend); - -int ath10k_core_target_resume(struct ath10k *ar) -{ - int ret; - - ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); - - ret = ath10k_wmi_pdev_resume_target(ar); - if (ret) - ath10k_warn("could not resume target (%d)\n", ret); - - return ret; -} -EXPORT_SYMBOL(ath10k_core_target_resume); - MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index d5b2533..5c94ea0 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -365,7 +365,4 @@ void ath10k_core_stop(struct ath10k *ar); int ath10k_core_register(struct ath10k *ar); void ath10k_core_unregister(struct ath10k *ar); -int ath10k_core_target_suspend(struct ath10k *ar); -int ath10k_core_target_resume(struct ath10k *ar); - #endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 00d2940..dcdea68 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -80,6 +80,9 @@ struct ath10k_hif_ops { /* Power down the device and free up resources. stop() must be called * before this if start() was called earlier */ void (*power_down)(struct ath10k *ar); + + int (*suspend)(struct ath10k *ar); + int (*resume)(struct ath10k *ar); }; @@ -154,4 +157,20 @@ static inline void ath10k_hif_power_down(struct ath10k *ar) ar->hif.ops->power_down(ar); } +static inline int ath10k_hif_suspend(struct ath10k *ar) +{ + if (!ar->hif.ops->suspend) + return -EOPNOTSUPP; + + return ar->hif.ops->suspend(ar); +} + +static inline int ath10k_hif_resume(struct ath10k *ar) +{ + if (!ar->hif.ops->resume) + return -EOPNOTSUPP; + + return ar->hif.ops->resume(ar); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 4627c16..febba7d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -20,6 +20,7 @@ #include #include +#include "hif.h" #include "core.h" #include "debug.h" #include "wmi.h" @@ -2749,6 +2750,67 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) return 1; } +#ifdef CONFIG_PM +static int ath10k_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct ath10k *ar = hw->priv; + int ret; + + ar->is_target_paused = false; + + ret = ath10k_wmi_pdev_suspend_target(ar); + if (ret) { + ath10k_warn("could not suspend target (%d)\n", ret); + return 1; + } + + ret = wait_event_interruptible_timeout(ar->event_queue, + ar->is_target_paused == true, + 1 * HZ); + if (ret < 0) { + ath10k_warn("suspend interrupted (%d)\n", ret); + goto resume; + } else if (ret == 0) { + ath10k_warn("suspend timed out - target pause event never came\n"); + goto resume; + } + + ret = ath10k_hif_suspend(ar); + if (ret) { + ath10k_warn("could not suspend hif (%d)\n", ret); + goto resume; + } + + return 0; +resume: + ret = ath10k_wmi_pdev_resume_target(ar); + if (ret) + ath10k_warn("could not resume target (%d)\n", ret); + return 1; +} + +static int ath10k_resume(struct ieee80211_hw *hw) +{ + struct ath10k *ar = hw->priv; + int ret; + + ret = ath10k_hif_resume(ar); + if (ret) { + ath10k_warn("could not resume hif (%d)\n", ret); + return 1; + } + + ret = ath10k_wmi_pdev_resume_target(ar); + if (ret) { + ath10k_warn("could not resume target (%d)\n", ret); + return 1; + } + + return 0; +} +#endif + static const struct ieee80211_ops ath10k_ops = { .tx = ath10k_tx, .start = ath10k_start, @@ -2769,6 +2831,10 @@ static const struct ieee80211_ops ath10k_ops = { .set_frag_threshold = ath10k_set_frag_threshold, .flush = ath10k_flush, .tx_last_beacon = ath10k_tx_last_beacon, +#ifdef CONFIG_PM + .suspend = ath10k_suspend, + .resume = ath10k_resume, +#endif }; #define RATETAB_ENT(_rate, _rateid, _flags) { \ diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index c15b0c5..75f5427 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1796,6 +1796,55 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar) ath10k_do_pci_sleep(ar); } +#ifdef CONFIG_PM + +#define ATH10K_PCI_PM_CONTROL 0x44 + +static int ath10k_pci_hif_suspend(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + u32 val; + + pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); + + if ((val & 0x000000ff) != 0x3) { + pci_save_state(pdev); + pci_disable_device(pdev); + pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, + (val & 0xffffff00) | 0x03); + } + + return 0; +} + +static int ath10k_pci_hif_resume(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + u32 val; + + pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); + + if ((val & 0x000000ff) != 0) { + pci_restore_state(pdev); + pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, + val & 0xffffff00); + /* + * Suspend/Resume resets the PCI configuration space, + * so we have to re-disable the RETRY_TIMEOUT register (0x41) + * to keep PCI Tx retries from interfering with C3 CPU state + */ + pci_read_config_dword(pdev, 0x40, &val); + + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + } + + return 0; +} +#endif + static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .send_head = ath10k_pci_hif_send_head, .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, @@ -1808,6 +1857,10 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, .power_up = ath10k_pci_hif_power_up, .power_down = ath10k_pci_hif_power_down, +#ifdef CONFIG_PM + .suspend = ath10k_pci_hif_suspend, + .resume = ath10k_pci_hif_resume, +#endif }; static void ath10k_pci_ce_tasklet(unsigned long ptr) @@ -2376,128 +2429,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) kfree(ar_pci); } -#if defined(CONFIG_PM_SLEEP) - -#define ATH10K_PCI_PM_CONTROL 0x44 - -static int ath10k_pci_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; - u32 val; - int ret, retval; - - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); - - if (!ar) - return -ENODEV; - - ar_pci = ath10k_pci_priv(ar); - if (!ar_pci) - return -ENODEV; - - if (ath10k_core_target_suspend(ar)) - return -EBUSY; - - ret = wait_event_interruptible_timeout(ar->event_queue, - ar->is_target_paused == true, - 1 * HZ); - if (ret < 0) { - ath10k_warn("suspend interrupted (%d)\n", ret); - retval = ret; - goto resume; - } else if (ret == 0) { - ath10k_warn("suspend timed out - target pause event never came\n"); - retval = EIO; - goto resume; - } - - /* - * reset is_target_paused and host can check that in next time, - * or it will always be TRUE and host just skip the waiting - * condition, it causes target assert due to host already - * suspend - */ - ar->is_target_paused = false; - - pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); - - if ((val & 0x000000ff) != 0x3) { - pci_save_state(pdev); - pci_disable_device(pdev); - pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, - (val & 0xffffff00) | 0x03); - } - - return 0; -resume: - ret = ath10k_core_target_resume(ar); - if (ret) - ath10k_warn("could not resume (%d)\n", ret); - - return retval; -} - -static int ath10k_pci_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; - int ret; - u32 val; - - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); - - if (!ar) - return -ENODEV; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci) - return -ENODEV; - - ret = pci_enable_device(pdev); - if (ret) { - ath10k_warn("cannot enable PCI device: %d\n", ret); - return ret; - } - - pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); - - if ((val & 0x000000ff) != 0) { - pci_restore_state(pdev); - pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, - val & 0xffffff00); - /* - * Suspend/Resume resets the PCI configuration space, - * so we have to re-disable the RETRY_TIMEOUT register (0x41) - * to keep PCI Tx retries from interfering with C3 CPU state - */ - pci_read_config_dword(pdev, 0x40, &val); - - if ((val & 0x0000ff00) != 0) - pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - } - - ret = ath10k_core_target_resume(ar); - if (ret) - ath10k_warn("target resume failed: %d\n", ret); - - return ret; -} - -static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops, - ath10k_pci_suspend, - ath10k_pci_resume); - -#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops) - -#else - -#define ATH10K_PCI_PM_OPS NULL - -#endif /* CONFIG_PM_SLEEP */ - MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); static struct pci_driver ath10k_pci_driver = { @@ -2505,7 +2436,6 @@ static struct pci_driver ath10k_pci_driver = { .id_table = ath10k_pci_id_table, .probe = ath10k_pci_probe, .remove = ath10k_pci_remove, - .driver.pm = ATH10K_PCI_PM_OPS, }; static int __init ath10k_pci_init(void) -- cgit v0.10.2 From 1a1b8a889d1fad1a0b48c07c26b894085fcc692c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:55 +0200 Subject: ath10k: move free_vdev_map initialization This is necessary for hw reconfiguration to work. Since mac80211 is not calling remove_interface() is such case we must reset free_vdev_map. Also use a define instead of a hardcoded value for vdev map initialization. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 01c1d82..0e4a704 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -458,8 +458,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, ar->hif.priv = hif_priv; ar->hif.ops = hif_ops; - ar->free_vdev_map = 0xFF; /* 8 vdevs */ - init_completion(&ar->scan.started); init_completion(&ar->scan.completed); init_completion(&ar->scan.on_channel); @@ -590,6 +588,8 @@ int ath10k_core_start(struct ath10k *ar) if (status) goto err_disconnect_htc; + ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + return 0; err_disconnect_htc: -- cgit v0.10.2 From a96d7745592274077e517173ec2cdac2a22d5b79 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:56 +0200 Subject: ath10k: make sure all resources are freed upon ath10k_stop() This is necessary for proper hw reconfiguration and to avoid memory leaks. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index febba7d..b9663e9 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -369,6 +369,20 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) spin_unlock_bh(&ar->data_lock); } +static void ath10k_peer_cleanup_all(struct ath10k *ar) +{ + struct ath10k_peer *peer, *tmp; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + list_for_each_entry_safe(peer, tmp, &ar->peers, list) { + list_del(&peer->list); + kfree(peer); + } + spin_unlock_bh(&ar->data_lock); +} + /************************/ /* Interface management */ /************************/ @@ -1753,7 +1767,18 @@ static void ath10k_stop(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); + del_timer_sync(&ar->scan.timeout); ath10k_offchan_tx_purge(ar); + ath10k_peer_cleanup_all(ar); + + spin_lock_bh(&ar->data_lock); + if (ar->scan.in_progress) { + del_timer(&ar->scan.timeout); + ar->scan.in_progress = false; + ieee80211_scan_completed(ar->hw, true); + } + spin_unlock_bh(&ar->data_lock); + ar->state = ATH10K_STATE_OFF; mutex_unlock(&ar->conf_mutex); -- cgit v0.10.2 From 818bdd16b229919cfd07447d261154a1343871e1 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:57 +0200 Subject: ath10k: defer hw setup to start/stop mac80211 hooks This fixes suspend-to-disk. The hardware is now re-initialized upon freeze/thaw properly. This also makes suspend/resume re-initialize the hardware as WoWLAN support is not done yet. With some little work it should be possible to support hw reconfiguration for hw/fw recovery. HW must be initialized once before registering to mac80211 because FW determinates what hw capabilities can be advertised. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 0e4a704..1d4c245 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -601,6 +601,7 @@ err_wmi_detach: err: return status; } +EXPORT_SYMBOL(ath10k_core_start); void ath10k_core_stop(struct ath10k *ar) { @@ -608,18 +609,49 @@ void ath10k_core_stop(struct ath10k *ar) ath10k_htt_detach(&ar->htt); ath10k_wmi_detach(ar); } +EXPORT_SYMBOL(ath10k_core_stop); + +/* mac80211 manages fw/hw initialization through start/stop hooks. However in + * order to know what hw capabilities should be advertised to mac80211 it is + * necessary to load the firmware (and tear it down immediately since start + * hook will try to init it again) before registering */ +static int ath10k_core_probe_fw(struct ath10k *ar) +{ + int ret; + + ret = ath10k_hif_power_up(ar); + if (ret) { + ath10k_err("could not start pci hif (%d)\n", ret); + return ret; + } + + ret = ath10k_core_start(ar); + if (ret) { + ath10k_err("could not init core (%d)\n", ret); + ath10k_hif_power_down(ar); + return ret; + } + + ath10k_core_stop(ar); + ath10k_hif_power_down(ar); + return 0; +} int ath10k_core_register(struct ath10k *ar) { int status; - status = ath10k_core_start(ar); - if (status) - goto err; + status = ath10k_core_probe_fw(ar); + if (status) { + ath10k_err("could not probe fw (%d)\n", status); + return status; + } status = ath10k_mac_register(ar); - if (status) - goto err_core_stop; + if (status) { + ath10k_err("could not register to mac80211 (%d)\n", status); + return status; + } status = ath10k_debug_create(ar); if (status) { @@ -631,9 +663,6 @@ int ath10k_core_register(struct ath10k *ar) err_unregister_mac: ath10k_mac_unregister(ar); -err_core_stop: - ath10k_core_stop(ar); -err: return status; } EXPORT_SYMBOL(ath10k_core_register); @@ -644,7 +673,6 @@ void ath10k_core_unregister(struct ath10k *ar) * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ ath10k_mac_unregister(ar); - ath10k_core_stop(ar); } EXPORT_SYMBOL(ath10k_core_unregister); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b9663e9..2dfd446 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1738,13 +1738,52 @@ static void ath10k_tx(struct ieee80211_hw *hw, /* * Initialize various parameters with default vaules. */ +static void ath10k_halt(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + del_timer_sync(&ar->scan.timeout); + ath10k_offchan_tx_purge(ar); + ath10k_peer_cleanup_all(ar); + ath10k_core_stop(ar); + ath10k_hif_power_down(ar); + + spin_lock_bh(&ar->data_lock); + if (ar->scan.in_progress) { + del_timer(&ar->scan.timeout); + ar->scan.in_progress = false; + ieee80211_scan_completed(ar->hw, true); + } + spin_unlock_bh(&ar->data_lock); +} + static int ath10k_start(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; - int ret; + int ret = 0; mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_OFF) { + ret = -EINVAL; + goto exit; + } + + ret = ath10k_hif_power_up(ar); + if (ret) { + ath10k_err("could not init hif (%d)\n", ret); + ar->state = ATH10K_STATE_OFF; + goto exit; + } + + ret = ath10k_core_start(ar); + if (ret) { + ath10k_err("could not init core (%d)\n", ret); + ath10k_hif_power_down(ar); + ar->state = ATH10K_STATE_OFF; + goto exit; + } + ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); if (ret) ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", @@ -1755,9 +1794,9 @@ static int ath10k_start(struct ieee80211_hw *hw) ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", ret); - ar->state = ATH10K_STATE_ON; ath10k_regd_update(ar); +exit: mutex_unlock(&ar->conf_mutex); return 0; } @@ -1767,17 +1806,8 @@ static void ath10k_stop(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - del_timer_sync(&ar->scan.timeout); - ath10k_offchan_tx_purge(ar); - ath10k_peer_cleanup_all(ar); - - spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - del_timer(&ar->scan.timeout); - ar->scan.in_progress = false; - ieee80211_scan_completed(ar->hw, true); - } - spin_unlock_bh(&ar->data_lock); + if (ar->state == ATH10K_STATE_ON) + ath10k_halt(ar); ar->state = ATH10K_STATE_OFF; mutex_unlock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 75f5427..bfe8561 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2362,22 +2362,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_iomap; } - ret = ath10k_pci_hif_power_up(ar); - if (ret) { - ath10k_err("could not start pci hif (%d)\n", ret); - goto err_intr; - } - ret = ath10k_core_register(ar); if (ret) { ath10k_err("could not register driver core (%d)\n", ret); - goto err_hif; + goto err_intr; } return 0; -err_hif: - ath10k_pci_hif_power_down(ar); err_intr: ath10k_pci_stop_intr(ar); err_iomap: @@ -2416,7 +2408,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) tasklet_kill(&ar_pci->msi_fw_err); ath10k_core_unregister(ar); - ath10k_pci_hif_power_down(ar); ath10k_pci_stop_intr(ar); pci_set_drvdata(pdev, NULL); -- cgit v0.10.2 From 293850575d0cba068913bcb22fc082286650516c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:58 +0200 Subject: ath10k: store firmware files in memory Different FW versions may provide different functions thus mean different hw capabilities advertised to mac80211. It is safe to swap firmware files on disk during driver/device runtime without worries. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 1d4c245..c37f79f 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -247,19 +247,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, static int ath10k_download_board_data(struct ath10k *ar) { + const struct firmware *fw = ar->board_data; u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 address; - const struct firmware *fw; int ret; - fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.board); - if (IS_ERR(fw)) { - ath10k_err("could not fetch board data fw file (%ld)\n", - PTR_ERR(fw)); - return PTR_ERR(fw); - } - ret = ath10k_push_board_ext_data(ar, fw); if (ret) { ath10k_err("could not push board ext data (%d)\n", ret); @@ -286,32 +278,20 @@ static int ath10k_download_board_data(struct ath10k *ar) } exit: - release_firmware(fw); return ret; } static int ath10k_download_and_run_otp(struct ath10k *ar) { - const struct firmware *fw; - u32 address; + const struct firmware *fw = ar->otp; + u32 address = ar->hw_params.patch_load_addr; u32 exec_param; int ret; /* OTP is optional */ - if (ar->hw_params.fw.otp == NULL) { - ath10k_info("otp file not defined\n"); - return 0; - } - - address = ar->hw_params.patch_load_addr; - - fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.otp); - if (IS_ERR(fw)) { - ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw)); + if (!ar->otp) return 0; - } ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); if (ret) { @@ -327,28 +307,17 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) } exit: - release_firmware(fw); return ret; } static int ath10k_download_fw(struct ath10k *ar) { - const struct firmware *fw; + const struct firmware *fw = ar->firmware; u32 address; int ret; - if (ar->hw_params.fw.fw == NULL) - return -EINVAL; - address = ar->hw_params.patch_load_addr; - fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.fw); - if (IS_ERR(fw)) { - ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw)); - return PTR_ERR(fw); - } - ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); if (ret) { ath10k_err("could not write fw (%d)\n", ret); @@ -356,7 +325,74 @@ static int ath10k_download_fw(struct ath10k *ar) } exit: - release_firmware(fw); + return ret; +} + +static void ath10k_core_free_firmware_files(struct ath10k *ar) +{ + if (ar->board_data && !IS_ERR(ar->board_data)) + release_firmware(ar->board_data); + + if (ar->otp && !IS_ERR(ar->otp)) + release_firmware(ar->otp); + + if (ar->firmware && !IS_ERR(ar->firmware)) + release_firmware(ar->firmware); + + ar->board_data = NULL; + ar->otp = NULL; + ar->firmware = NULL; +} + +static int ath10k_core_fetch_firmware_files(struct ath10k *ar) +{ + int ret = 0; + + if (ar->hw_params.fw.fw == NULL) { + ath10k_err("firmware file not defined\n"); + return -EINVAL; + } + + if (ar->hw_params.fw.board == NULL) { + ath10k_err("board data file not defined"); + return -EINVAL; + } + + ar->board_data = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); + if (IS_ERR(ar->board_data)) { + ret = PTR_ERR(ar->board_data); + ath10k_err("could not fetch board data (%d)\n", ret); + goto err; + } + + ar->firmware = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.fw); + if (IS_ERR(ar->firmware)) { + ret = PTR_ERR(ar->firmware); + ath10k_err("could not fetch firmware (%d)\n", ret); + goto err; + } + + /* OTP may be undefined. If so, don't fetch it at all */ + if (ar->hw_params.fw.otp == NULL) + return 0; + + ar->otp = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.otp); + if (IS_ERR(ar->otp)) { + ret = PTR_ERR(ar->otp); + ath10k_err("could not fetch otp (%d)\n", ret); + goto err; + } + + return 0; + +err: + ath10k_core_free_firmware_files(ar); return ret; } @@ -502,23 +538,10 @@ EXPORT_SYMBOL(ath10k_core_destroy); int ath10k_core_start(struct ath10k *ar) { - struct bmi_target_info target_info; int status; ath10k_bmi_start(ar); - memset(&target_info, 0, sizeof(target_info)); - status = ath10k_bmi_get_target_info(ar, &target_info); - if (status) - goto err; - - ar->target_version = target_info.version; - ar->hw->wiphy->hw_version = target_info.version; - - status = ath10k_init_hw_params(ar); - if (status) - goto err; - if (ath10k_init_configure_target(ar)) { status = -EINVAL; goto err; @@ -617,7 +640,8 @@ EXPORT_SYMBOL(ath10k_core_stop); * hook will try to init it again) before registering */ static int ath10k_core_probe_fw(struct ath10k *ar) { - int ret; + struct bmi_target_info target_info; + int ret = 0; ret = ath10k_hif_power_up(ar); if (ret) { @@ -625,9 +649,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar) return ret; } + memset(&target_info, 0, sizeof(target_info)); + ret = ath10k_bmi_get_target_info(ar, &target_info); + if (ret) { + ath10k_err("could not get target info (%d)\n", ret); + ath10k_hif_power_down(ar); + return ret; + } + + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; + + ret = ath10k_init_hw_params(ar); + if (ret) { + ath10k_err("could not get hw params (%d)\n", ret); + ath10k_hif_power_down(ar); + return ret; + } + + ret = ath10k_core_fetch_firmware_files(ar); + if (ret) { + ath10k_err("could not fetch firmware files (%d)\n", ret); + ath10k_hif_power_down(ar); + return ret; + } + ret = ath10k_core_start(ar); if (ret) { ath10k_err("could not init core (%d)\n", ret); + ath10k_core_free_firmware_files(ar); ath10k_hif_power_down(ar); return ret; } @@ -650,7 +700,7 @@ int ath10k_core_register(struct ath10k *ar) status = ath10k_mac_register(ar); if (status) { ath10k_err("could not register to mac80211 (%d)\n", status); - return status; + goto err_release_fw; } status = ath10k_debug_create(ar); @@ -663,6 +713,8 @@ int ath10k_core_register(struct ath10k *ar) err_unregister_mac: ath10k_mac_unregister(ar); +err_release_fw: + ath10k_core_free_firmware_files(ar); return status; } EXPORT_SYMBOL(ath10k_core_register); @@ -673,6 +725,7 @@ void ath10k_core_unregister(struct ath10k *ar) * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ ath10k_mac_unregister(ar); + ath10k_core_free_firmware_files(ar); } EXPORT_SYMBOL(ath10k_core_unregister); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 5c94ea0..413f1c5 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -300,6 +300,10 @@ struct ath10k { } fw; } hw_params; + const struct firmware *board_data; + const struct firmware *otp; + const struct firmware *firmware; + struct { struct completion started; struct completion completed; -- cgit v0.10.2 From 87571bf0b8b27d4a97848ce48de34fa6d3b12db8 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:38:59 +0200 Subject: ath10k: skip fw stats debugfs interface if device is down If the device is not running then there may be no FW at all to send the query to. If the FW is already there it might still trigger a crash if the command is sent before the device is fully initialized. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 499034b..65279f5 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar, struct wmi_pdev_stats *ps; int i; - mutex_lock(&ar->conf_mutex); + spin_lock_bh(&ar->data_lock); stats = &ar->debug.target_stats; @@ -259,6 +259,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar, } } + spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); complete(&ar->debug.event_stats_compl); } @@ -268,35 +269,35 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, { struct ath10k *ar = file->private_data; struct ath10k_target_stats *fw_stats; - char *buf; + char *buf = NULL; unsigned int len = 0, buf_len = 2500; - ssize_t ret_cnt; + ssize_t ret_cnt = 0; long left; int i; int ret; fw_stats = &ar->debug.target_stats; + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) + goto exit; + buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return -ENOMEM; + goto exit; ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); if (ret) { ath10k_warn("could not request stats (%d)\n", ret); - kfree(buf); - return -EIO; + goto exit; } left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ); + if (left <= 0) + goto exit; - if (left <= 0) { - kfree(buf); - return -ETIMEDOUT; - } - - mutex_lock(&ar->conf_mutex); - + spin_lock_bh(&ar->data_lock); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", "ath10k PDEV stats"); @@ -424,14 +425,15 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, fw_stats->peer_stat[i].peer_tx_rate); len += scnprintf(buf + len, buf_len - len, "\n"); } + spin_unlock_bh(&ar->data_lock); if (len > buf_len) len = buf_len; ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); +exit: mutex_unlock(&ar->conf_mutex); - kfree(buf); return ret_cnt; } -- cgit v0.10.2 From affd321733eebc92b12cd329505f63e94ae80c93 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:54:35 +0200 Subject: ath10k: implement device recovery Restart the hardware if FW crashes. If FW crashes during recovery we leave the hardware in a "wedged" state to avoid recursive recoveries. When in "wedged" state userspace may bring interfaces down (to issue stop()) and then bring one interface (to issue start()) to reload hardware manually. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c37f79f..7226c23 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -476,6 +476,34 @@ static int ath10k_init_hw_params(struct ath10k *ar) return 0; } +static void ath10k_core_restart(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, restart_work); + + mutex_lock(&ar->conf_mutex); + + switch (ar->state) { + case ATH10K_STATE_ON: + ath10k_halt(ar); + ar->state = ATH10K_STATE_RESTARTING; + ieee80211_restart_hw(ar->hw); + break; + case ATH10K_STATE_OFF: + /* this can happen if driver is being unloaded */ + ath10k_warn("cannot restart a device that hasn't been started\n"); + break; + case ATH10K_STATE_RESTARTING: + case ATH10K_STATE_RESTARTED: + ar->state = ATH10K_STATE_WEDGED; + /* fall through */ + case ATH10K_STATE_WEDGED: + ath10k_warn("device is wedged, will not restart\n"); + break; + } + + mutex_unlock(&ar->conf_mutex); +} + struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, const struct ath10k_hif_ops *hif_ops) { @@ -519,6 +547,8 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, init_waitqueue_head(&ar->event_queue); + INIT_WORK(&ar->restart_work, ath10k_core_restart); + return ar; err_wq: diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 413f1c5..9f21ecb 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -250,6 +250,23 @@ struct ath10k_debug { enum ath10k_state { ATH10K_STATE_OFF = 0, ATH10K_STATE_ON, + + /* When doing firmware recovery the device is first powered down. + * mac80211 is supposed to call in to start() hook later on. It is + * however possible that driver unloading and firmware crash overlap. + * mac80211 can wait on conf_mutex in stop() while the device is + * stopped in ath10k_core_restart() work holding conf_mutex. The state + * RESTARTED means that the device is up and mac80211 has started hw + * reconfiguration. Once mac80211 is done with the reconfiguration we + * set the state to STATE_ON in restart_complete(). */ + ATH10K_STATE_RESTARTING, + ATH10K_STATE_RESTARTED, + + /* The device has crashed while restarting hw. This state is like ON + * but commands are blocked in HTC and -ECOMM response is given. This + * prevents completion timeouts and makes the driver more responsive to + * userspace commands. This is also prevents recursive recovery. */ + ATH10K_STATE_WEDGED, }; struct ath10k { @@ -355,6 +372,8 @@ struct ath10k { enum ath10k_state state; + struct work_struct restart_work; + #ifdef CONFIG_ATH10K_DEBUGFS struct ath10k_debug debug; #endif diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 7d5a366..72e072c 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -246,6 +246,9 @@ int ath10k_htc_send(struct ath10k_htc *htc, { struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + if (htc->ar->state == ATH10K_STATE_WEDGED) + return -ECOMM; + if (eid >= ATH10K_HTC_EP_COUNT) { ath10k_warn("Invalid endpoint id: %d\n", eid); return -ENOENT; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 2dfd446..b1bb318 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1738,7 +1738,7 @@ static void ath10k_tx(struct ieee80211_hw *hw, /* * Initialize various parameters with default vaules. */ -static void ath10k_halt(struct ath10k *ar) +void ath10k_halt(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); @@ -1764,7 +1764,8 @@ static int ath10k_start(struct ieee80211_hw *hw) mutex_lock(&ar->conf_mutex); - if (ar->state != ATH10K_STATE_OFF) { + if (ar->state != ATH10K_STATE_OFF && + ar->state != ATH10K_STATE_RESTARTING) { ret = -EINVAL; goto exit; } @@ -1784,6 +1785,11 @@ static int ath10k_start(struct ieee80211_hw *hw) goto exit; } + if (ar->state == ATH10K_STATE_OFF) + ar->state = ATH10K_STATE_ON; + else if (ar->state == ATH10K_STATE_RESTARTING) + ar->state = ATH10K_STATE_RESTARTED; + ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); if (ret) ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", @@ -1806,22 +1812,47 @@ static void ath10k_stop(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - if (ar->state == ATH10K_STATE_ON) + if (ar->state == ATH10K_STATE_ON || + ar->state == ATH10K_STATE_RESTARTED || + ar->state == ATH10K_STATE_WEDGED) ath10k_halt(ar); ar->state = ATH10K_STATE_OFF; mutex_unlock(&ar->conf_mutex); cancel_work_sync(&ar->offchan_tx_work); + cancel_work_sync(&ar->restart_work); } -static int ath10k_config(struct ieee80211_hw *hw, u32 changed) +static void ath10k_config_ps(struct ath10k *ar) { struct ath10k_generic_iter ar_iter; + + lockdep_assert_held(&ar->conf_mutex); + + /* During HW reconfiguration mac80211 reports all interfaces that were + * running until reconfiguration was started. Since FW doesn't have any + * vdevs at this point we must not iterate over this interface list. + * This setting will be updated upon add_interface(). */ + if (ar->state == ATH10K_STATE_RESTARTED) + return; + + memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); + ar_iter.ar = ar; + + ieee80211_iterate_active_interfaces_atomic( + ar->hw, IEEE80211_IFACE_ITER_NORMAL, + ath10k_ps_iter, &ar_iter); + + if (ar_iter.ret) + ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret); +} + +static int ath10k_config(struct ieee80211_hw *hw, u32 changed) +{ struct ath10k *ar = hw->priv; struct ieee80211_conf *conf = &hw->conf; int ret = 0; - u32 flags; mutex_lock(&ar->conf_mutex); @@ -1833,18 +1864,8 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) spin_unlock_bh(&ar->data_lock); } - if (changed & IEEE80211_CONF_CHANGE_PS) { - memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); - ar_iter.ar = ar; - flags = IEEE80211_IFACE_ITER_RESUME_ALL; - - ieee80211_iterate_active_interfaces_atomic(hw, - flags, - ath10k_ps_iter, - &ar_iter); - - ret = ar_iter.ret; - } + if (changed & IEEE80211_CONF_CHANGE_PS) + ath10k_config_ps(ar); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (conf->flags & IEEE80211_CONF_MONITOR) @@ -1853,6 +1874,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) ret = ath10k_monitor_destroy(ar); } + ath10k_wmi_flush_tx(ar); mutex_unlock(&ar->conf_mutex); return ret; } @@ -2695,6 +2717,13 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) lockdep_assert_held(&arvif->ar->conf_mutex); + /* During HW reconfiguration mac80211 reports all interfaces that were + * running until reconfiguration was started. Since FW doesn't have any + * vdevs at this point we must not iterate over this interface list. + * This setting will be updated upon add_interface(). */ + if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) + return; + rts = min_t(u32, rts, ATH10K_RTS_MAX); ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, @@ -2735,6 +2764,13 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) lockdep_assert_held(&arvif->ar->conf_mutex); + /* During HW reconfiguration mac80211 reports all interfaces that were + * running until reconfiguration was started. Since FW doesn't have any + * vdevs at this point we must not iterate over this interface list. + * This setting will be updated upon add_interface(). */ + if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) + return; + frag = clamp_t(u32, frag, ATH10K_FRAGMT_THRESHOLD_MIN, ATH10K_FRAGMT_THRESHOLD_MAX); @@ -2773,6 +2809,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) { struct ath10k *ar = hw->priv; + bool skip; int ret; /* mac80211 doesn't care if we really xmit queued frames or not @@ -2782,17 +2819,26 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) mutex_lock(&ar->conf_mutex); + if (ar->state == ATH10K_STATE_WEDGED) + goto skip; + ret = wait_event_timeout(ar->htt.empty_tx_wq, ({ bool empty; + spin_lock_bh(&ar->htt.tx_lock); empty = bitmap_empty(ar->htt.used_msdu_ids, ar->htt.max_num_pending_tx); spin_unlock_bh(&ar->htt.tx_lock); - (empty); + + skip = (ar->state == ATH10K_STATE_WEDGED); + + (empty || skip); }), ATH10K_FLUSH_TIMEOUT_HZ); - if (ret <= 0) + + if (ret <= 0 || skip) ath10k_warn("tx not flushed\n"); +skip: mutex_unlock(&ar->conf_mutex); } @@ -2866,6 +2912,22 @@ static int ath10k_resume(struct ieee80211_hw *hw) } #endif +static void ath10k_restart_complete(struct ieee80211_hw *hw) +{ + struct ath10k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + /* If device failed to restart it will be in a different state, e.g. + * ATH10K_STATE_WEDGED */ + if (ar->state == ATH10K_STATE_RESTARTED) { + ath10k_info("device successfully recovered\n"); + ar->state = ATH10K_STATE_ON; + } + + mutex_unlock(&ar->conf_mutex); +} + static const struct ieee80211_ops ath10k_ops = { .tx = ath10k_tx, .start = ath10k_start, @@ -2886,6 +2948,7 @@ static const struct ieee80211_ops ath10k_ops = { .set_frag_threshold = ath10k_set_frag_threshold, .flush = ath10k_flush, .tx_last_beacon = ath10k_tx_last_beacon, + .restart_complete = ath10k_restart_complete, #ifdef CONFIG_PM .suspend = ath10k_suspend, .resume = ath10k_resume, diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 27fc92e..6fce9bf 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -34,6 +34,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id); void ath10k_reset_scan(unsigned long ptr); void ath10k_offchan_tx_purge(struct ath10k *ar); void ath10k_offchan_tx_work(struct work_struct *work); +void ath10k_halt(struct ath10k *ar); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index bfe8561..c71b488 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -720,6 +720,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) reg_dump_values[i + 1], reg_dump_values[i + 2], reg_dump_values[i + 3]); + + ieee80211_queue_work(ar->hw, &ar->restart_work); } static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index b7e7e45..0d25cd7 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -27,6 +27,13 @@ void ath10k_wmi_flush_tx(struct ath10k *ar) { int ret; + lockdep_assert_held(&ar->conf_mutex); + + if (ar->state == ATH10K_STATE_WEDGED) { + ath10k_warn("wmi flush skipped - device is wedged anyway\n"); + return; + } + ret = wait_event_timeout(ar->wmi.wq, atomic_read(&ar->wmi.pending_tx_count) == 0, 5*HZ); -- cgit v0.10.2 From 9cfbce75c806e151b11a5d2c04b3aea3e920b3c1 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 09:54:36 +0200 Subject: ath10k: implement fw crash simulation command This can be useful to test FW crash handling. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 0d25cd7..5e42460 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2092,3 +2092,22 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); } + +int ath10k_wmi_force_fw_hang(struct ath10k *ar, + enum wmi_force_fw_hang_type type, u32 delay_ms) +{ + struct wmi_force_fw_hang_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_force_fw_hang_cmd *)skb->data; + cmd->type = __cpu_to_le32(type); + cmd->delay_ms = __cpu_to_le32(delay_ms); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", + type, delay_ms); + return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID); +} diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 9555f5a..da3b2bc 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -416,6 +416,7 @@ enum wmi_cmd_id { WMI_PDEV_FTM_INTG_CMDID, WMI_VDEV_SET_KEEPALIVE_CMDID, WMI_VDEV_GET_KEEPALIVE_CMDID, + WMI_FORCE_FW_HANG_CMDID, /* GPIO Configuration */ WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO), @@ -2972,6 +2973,22 @@ struct wmi_sta_keepalive_cmd { struct wmi_sta_keepalive_arp_resp arp_resp; } __packed; +enum wmi_force_fw_hang_type { + WMI_FORCE_FW_HANG_ASSERT = 1, + WMI_FORCE_FW_HANG_NO_DETECT, + WMI_FORCE_FW_HANG_CTRL_EP_FULL, + WMI_FORCE_FW_HANG_EMPTY_POINT, + WMI_FORCE_FW_HANG_STACK_OVERFLOW, + WMI_FORCE_FW_HANG_INFINITE_LOOP, +}; + +#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF + +struct wmi_force_fw_hang_cmd { + __le32 type; + __le32 delay_ms; +} __packed; + #define ATH10K_RTS_MAX 2347 #define ATH10K_FRAGMT_THRESHOLD_MIN 540 #define ATH10K_FRAGMT_THRESHOLD_MAX 2346 @@ -3048,5 +3065,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg); int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, const struct wmi_pdev_set_wmm_params_arg *arg); int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); +int ath10k_wmi_force_fw_hang(struct ath10k *ar, + enum wmi_force_fw_hang_type type, u32 delay_ms); #endif /* _WMI_H_ */ -- cgit v0.10.2 From cf84bd4defe22c7359bd3e4d6978bd88af1f8f90 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 16 Jul 2013 11:04:54 +0200 Subject: ath10k: fix NULL dereference for injected packets Tx processing functions dereference vif and caused NULL to be dereferenced for injected frames. Don't call these functions at all for injected frames. It doesn't make much sense to do so anyway. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b1bb318..07e5f7d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1710,10 +1710,14 @@ static void ath10k_tx(struct ieee80211_hw *hw, tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; } - ath10k_tx_h_qos_workaround(hw, control, skb); - ath10k_tx_h_update_wep_key(skb); - ath10k_tx_h_add_p2p_noa_ie(ar, skb); - ath10k_tx_h_seq_no(skb); + /* it makes no sense to process injected frames like that */ + if (info->control.vif && + info->control.vif->type != NL80211_IFTYPE_MONITOR) { + ath10k_tx_h_qos_workaround(hw, control, skb); + ath10k_tx_h_update_wep_key(skb); + ath10k_tx_h_add_p2p_noa_ie(ar, skb); + ath10k_tx_h_seq_no(skb); + } memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; -- cgit v0.10.2 From 278c4a85e6267979ab164c0a8b57447005cf388c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 22 Jul 2013 14:08:51 +0200 Subject: ath10k: create debugfs interface to trigger fw crash This can be useful for testing. To perform a forced firmware crash write 'crash' to 'simulate_fw_crash' debugfs file. E.g. echo crash > /sys/kernel/debug/ieee80211/phy1/ath10k/simulate_fw_crash Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 65279f5..3d65594 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -445,6 +445,60 @@ static const struct file_operations fops_fw_stats = { .llseek = default_llseek, }; +static ssize_t ath10k_read_simulate_fw_crash(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char buf[] = "To simulate firmware crash write the keyword" + " `crash` to this file.\nThis will force firmware" + " to report a crash to the host system.\n"; + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +static ssize_t ath10k_write_simulate_fw_crash(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32] = {}; + int ret; + + mutex_lock(&ar->conf_mutex); + + simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) { + ret = -EINVAL; + goto exit; + } + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + + ath10k_info("simulating firmware crash\n"); + + ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); + if (ret) + ath10k_warn("failed to force fw hang (%d)\n", ret); + + if (ret == 0) + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_simulate_fw_crash = { + .read = ath10k_read_simulate_fw_crash, + .write = ath10k_write_simulate_fw_crash, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_create(struct ath10k *ar) { ar->debug.debugfs_phy = debugfs_create_dir("ath10k", @@ -461,6 +515,9 @@ int ath10k_debug_create(struct ath10k *ar) debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_wmi_services); + debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_simulate_fw_crash); + return 0; } #endif /* CONFIG_ATH10K_DEBUGFS */ -- cgit v0.10.2 From 08fe9b40d055d4ace995a5e1e93c7c17573f17a5 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 22 Jul 2013 14:13:28 +0200 Subject: ath10k: prevent HTC from being used after stopping It was possible to submit new HTC commands after/while HTC stopped. This led to memory corruption in some rare cases. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 72e072c..47b7752 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -254,10 +254,14 @@ int ath10k_htc_send(struct ath10k_htc *htc, return -ENOENT; } - skb_push(skb, sizeof(struct ath10k_htc_hdr)); - spin_lock_bh(&htc->tx_lock); + if (htc->stopped) { + spin_unlock_bh(&htc->tx_lock); + return -ESHUTDOWN; + } + __skb_queue_tail(&ep->tx_queue, skb); + skb_push(skb, sizeof(struct ath10k_htc_hdr)); spin_unlock_bh(&htc->tx_lock); queue_work(htc->ar->workqueue, &ep->send_work); @@ -270,23 +274,17 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar, { struct ath10k_htc *htc = &ar->htc; struct ath10k_htc_ep *ep = &htc->endpoint[eid]; - bool stopping; ath10k_htc_notify_tx_completion(ep, skb); /* the skb now belongs to the completion handler */ + /* note: when using TX credit flow, the re-checking of queues happens + * when credits flow back from the target. in the non-TX credit case, + * we recheck after the packet completes */ spin_lock_bh(&htc->tx_lock); - stopping = htc->stopping; - spin_unlock_bh(&htc->tx_lock); - - if (!ep->tx_credit_flow_enabled && !stopping) - /* - * note: when using TX credit flow, the re-checking of - * queues happens when credits flow back from the target. - * in the non-TX credit case, we recheck after the packet - * completes - */ + if (!ep->tx_credit_flow_enabled && !htc->stopped) queue_work(ar->workqueue, &ep->send_work); + spin_unlock_bh(&htc->tx_lock); return 0; } @@ -951,7 +949,7 @@ void ath10k_htc_stop(struct ath10k_htc *htc) struct ath10k_htc_ep *ep; spin_lock_bh(&htc->tx_lock); - htc->stopping = true; + htc->stopped = true; spin_unlock_bh(&htc->tx_lock); for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { @@ -972,6 +970,7 @@ int ath10k_htc_init(struct ath10k *ar) spin_lock_init(&htc->tx_lock); + htc->stopped = false; ath10k_htc_reset_endpoint_states(htc); /* setup HIF layer callbacks */ diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 1606c9f..e1dd8c7 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -335,7 +335,7 @@ struct ath10k_htc { struct ath10k *ar; struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT]; - /* protects endpoint and stopping fields */ + /* protects endpoint and stopped fields */ spinlock_t tx_lock; struct ath10k_htc_ops htc_ops; @@ -349,7 +349,7 @@ struct ath10k_htc { struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; int target_credit_size; - bool stopping; + bool stopped; }; int ath10k_htc_init(struct ath10k *ar); -- cgit v0.10.2 From 21bf9112b596cb91e4f63de859ea514f081031fd Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 22 Jul 2013 14:13:29 +0200 Subject: ath10k: don't reset HTC endpoints unnecessarily Endpoints are re-initialized upon HTC start anyway so there's no need to do that twice in case of restarting HTC (i.e. in case of hardware recovery). Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 47b7752..ef3329e 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -958,7 +958,6 @@ void ath10k_htc_stop(struct ath10k_htc *htc) } ath10k_hif_stop(htc->ar); - ath10k_htc_reset_endpoint_states(htc); } /* registered target arrival callback from the HIF layer */ -- cgit v0.10.2 From d6015b27f7dd6e167cd90b590dc9ed5dfbc68086 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 22 Jul 2013 14:13:30 +0200 Subject: ath10k: fix memleak in mac setup In some cases channel arrays were never freed. The patch also unifies error handling in the mac setup function. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 07e5f7d..6144b3b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3225,8 +3225,10 @@ int ath10k_mac_register(struct ath10k *ar) channels = kmemdup(ath10k_2ghz_channels, sizeof(ath10k_2ghz_channels), GFP_KERNEL); - if (!channels) - return -ENOMEM; + if (!channels) { + ret = -ENOMEM; + goto err_free; + } band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); @@ -3245,11 +3247,8 @@ int ath10k_mac_register(struct ath10k *ar) sizeof(ath10k_5ghz_channels), GFP_KERNEL); if (!channels) { - if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { - band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; - kfree(band->channels); - } - return -ENOMEM; + ret = -ENOMEM; + goto err_free; } band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; @@ -3313,25 +3312,30 @@ int ath10k_mac_register(struct ath10k *ar) ath10k_reg_notifier); if (ret) { ath10k_err("Regulatory initialization failed\n"); - return ret; + goto err_free; } ret = ieee80211_register_hw(ar->hw); if (ret) { ath10k_err("ieee80211 registration failed: %d\n", ret); - return ret; + goto err_free; } if (!ath_is_world_regd(&ar->ath_common.regulatory)) { ret = regulatory_hint(ar->hw->wiphy, ar->ath_common.regulatory.alpha2); if (ret) - goto exit; + goto err_unregister; } return 0; -exit: + +err_unregister: ieee80211_unregister_hw(ar->hw); +err_free: + kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); + return ret; } -- cgit v0.10.2 From 424121c365aed6ec93bbc8b515548df79c5af61c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 22 Jul 2013 14:13:31 +0200 Subject: ath10k: fix rts/fragmentation threshold setup If RTS and fragmentation threshold values are 0xFFFFFFFF they should be considered disabled and no min/max limits must be applied. This fixes some issues with throughput issues, especially with VHT. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 6144b3b..d0a7761 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -332,6 +332,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) return 0; } +static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) +{ + if (value != 0xFFFFFFFF) + value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold, + ATH10K_RTS_MAX); + + return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, + WMI_VDEV_PARAM_RTS_THRESHOLD, + value); +} + +static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value) +{ + if (value != 0xFFFFFFFF) + value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold, + ATH10K_FRAGMT_THRESHOLD_MIN, + ATH10K_FRAGMT_THRESHOLD_MAX); + + return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + value); +} + static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) { int ret; @@ -1897,7 +1920,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); enum wmi_sta_powersave_param param; int ret = 0; - u32 value, rts, frag; + u32 value; int bit; mutex_lock(&ar->conf_mutex); @@ -2000,20 +2023,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ath10k_warn("Failed to set PSPOLL count: %d\n", ret); } - rts = min_t(u32, ar->hw->wiphy->rts_threshold, ATH10K_RTS_MAX); - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_RTS_THRESHOLD, - rts); + ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); if (ret) ath10k_warn("failed to set rts threshold for vdev %d (%d)\n", arvif->vdev_id, ret); - frag = clamp_t(u32, ar->hw->wiphy->frag_threshold, - ATH10K_FRAGMT_THRESHOLD_MIN, - ATH10K_FRAGMT_THRESHOLD_MAX); - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, - frag); + ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); if (ret) ath10k_warn("failed to set frag threshold for vdev %d (%d)\n", arvif->vdev_id, ret); @@ -2728,11 +2743,7 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) return; - rts = min_t(u32, rts, ATH10K_RTS_MAX); - - ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, - WMI_VDEV_PARAM_RTS_THRESHOLD, - rts); + ar_iter->ret = ath10k_mac_set_rts(arvif, rts); if (ar_iter->ret) ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", arvif->vdev_id); @@ -2764,7 +2775,6 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) struct ath10k_generic_iter *ar_iter = data; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; - int ret; lockdep_assert_held(&arvif->ar->conf_mutex); @@ -2775,15 +2785,7 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) return; - frag = clamp_t(u32, frag, - ATH10K_FRAGMT_THRESHOLD_MIN, - ATH10K_FRAGMT_THRESHOLD_MAX); - - ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, - WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, - frag); - - ar_iter->ret = ret; + ar_iter->ret = ath10k_mac_set_frag(arvif, frag); if (ar_iter->ret) ath10k_warn("Failed to set frag threshold for VDEV: %d\n", arvif->vdev_id); -- cgit v0.10.2 From 60ff779c4abba37a31bd8624ef45026f7fb1b70c Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Thu, 25 Jul 2013 10:54:24 +0200 Subject: 9p: client: remove unused code and any reference to "cancelled" function This patch reverts commit 80b45261a0b263536b043c5ccfc4ba4fc27c2acc which was implementing a 'cancelled' functionality to notify that a cancelled request will not be replied. This implementation was not used anywhere and therefore removed. Signed-off-by: Andi Shyti Signed-off-by: David S. Miller diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index d9fa68f..9a36d92 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -40,8 +40,6 @@ * @close: member function to discard a connection on this transport * @request: member function to issue a request to the transport * @cancel: member function to cancel a request (if it hasn't been sent) - * @cancelled: member function to notify that a cancelled request will not - * not receive a reply * * This is the basic API for a transport module which is registered by the * transport module with the 9P core network module and used by the client @@ -60,7 +58,6 @@ struct p9_trans_module { void (*close) (struct p9_client *); int (*request) (struct p9_client *, struct p9_req_t *req); int (*cancel) (struct p9_client *, struct p9_req_t *req); - int (*cancelled)(struct p9_client *, struct p9_req_t *req); int (*zc_request)(struct p9_client *, struct p9_req_t *, char *, char *, int , int, int, int); }; diff --git a/net/9p/client.c b/net/9p/client.c index 8b93cae..ba93bda 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -658,17 +658,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) /* * if we haven't received a response for oldreq, - * remove it from the list, and notify the transport - * layer that the reply will never arrive. + * remove it from the list */ - spin_lock(&c->lock); if (oldreq->status == REQ_STATUS_FLSH) { + spin_lock(&c->lock); list_del(&oldreq->req_list); spin_unlock(&c->lock); - if (c->trans_mod->cancelled) - c->trans_mod->cancelled(c, req); - } else { - spin_unlock(&c->lock); } p9_free_req(c, req); -- cgit v0.10.2 From d68e2d3bd229721ad3ea0f6bec7d3407cf8bf1f9 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 25 Jul 2013 12:41:15 -0400 Subject: tile: handle 64-bit statistics in tilepro network driver Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 3643549..f66ac20 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -156,10 +157,13 @@ struct tile_netio_queue { * Statistics counters for a specific cpu and device. */ struct tile_net_stats_t { - u32 rx_packets; - u32 rx_bytes; - u32 tx_packets; - u32 tx_bytes; + struct u64_stats_sync syncp; + u64 rx_packets; /* total packets received */ + u64 tx_packets; /* total packets transmitted */ + u64 rx_bytes; /* total bytes received */ + u64 tx_bytes; /* total bytes transmitted */ + u64 rx_errors; /* packets truncated or marked bad by hw */ + u64 rx_dropped; /* packets not for us or intf not up */ }; @@ -218,8 +222,6 @@ struct tile_net_priv { int network_cpus_count; /* Credits per network cpu. */ int network_cpus_credits; - /* Network stats. */ - struct net_device_stats stats; /* For NetIO bringup retries. */ struct delayed_work retry_work; /* Quick access to per cpu data. */ @@ -847,6 +849,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) } } + u64_stats_update_begin(&stats->syncp); + if (filter) { /* ISSUE: Update "drop" statistics? */ @@ -881,6 +885,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) stats->rx_bytes += len; } + u64_stats_update_end(&stats->syncp); + /* ISSUE: It would be nice to defer this until the packet has */ /* actually been processed. */ tile_net_return_credit(info); @@ -1907,8 +1913,10 @@ busy: kfree_skb(olds[i]); /* Update stats. */ + u64_stats_update_begin(&stats->syncp); stats->tx_packets += num_segs; stats->tx_bytes += (num_segs * sh_len) + d_len; + u64_stats_update_end(&stats->syncp); /* Make sure the egress timer is scheduled. */ tile_net_schedule_egress_timer(info); @@ -2089,8 +2097,10 @@ busy: kfree_skb(olds[i]); /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ + u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); + u64_stats_update_end(&stats->syncp); /* Make sure the egress timer is scheduled. */ tile_net_schedule_egress_timer(info); @@ -2127,30 +2137,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) * * Returns the address of the device statistics structure. */ -static struct net_device_stats *tile_net_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tile_net_priv *priv = netdev_priv(dev); - u32 rx_packets = 0; - u32 tx_packets = 0; - u32 rx_bytes = 0; - u32 tx_bytes = 0; + u64 rx_packets = 0, tx_packets = 0; + u64 rx_bytes = 0, tx_bytes = 0; + u64 rx_errors = 0, rx_dropped = 0; int i; for_each_online_cpu(i) { - if (priv->cpu[i]) { - rx_packets += priv->cpu[i]->stats.rx_packets; - rx_bytes += priv->cpu[i]->stats.rx_bytes; - tx_packets += priv->cpu[i]->stats.tx_packets; - tx_bytes += priv->cpu[i]->stats.tx_bytes; - } + struct tile_net_stats_t *cpu_stats; + u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes; + u64 trx_errors, trx_dropped; + unsigned int start; + + if (priv->cpu[i] == NULL) + continue; + cpu_stats = &priv->cpu[i]->stats; + + do { + start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); + trx_packets = cpu_stats->rx_packets; + ttx_packets = cpu_stats->tx_packets; + trx_bytes = cpu_stats->rx_bytes; + ttx_bytes = cpu_stats->tx_bytes; + trx_errors = cpu_stats->rx_errors; + trx_dropped = cpu_stats->rx_dropped; + } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); + + rx_packets += trx_packets; + tx_packets += ttx_packets; + rx_bytes += trx_bytes; + tx_bytes += ttx_bytes; + rx_errors += trx_errors; + rx_dropped += trx_dropped; } - priv->stats.rx_packets = rx_packets; - priv->stats.rx_bytes = rx_bytes; - priv->stats.tx_packets = tx_packets; - priv->stats.tx_bytes = tx_bytes; + stats->rx_packets = rx_packets; + stats->tx_packets = tx_packets; + stats->rx_bytes = rx_bytes; + stats->tx_bytes = tx_bytes; + stats->rx_errors = rx_errors; + stats->rx_dropped = rx_dropped; - return &priv->stats; + return stats; } @@ -2287,7 +2318,7 @@ static const struct net_device_ops tile_net_ops = { .ndo_stop = tile_net_stop, .ndo_start_xmit = tile_net_tx, .ndo_do_ioctl = tile_net_ioctl, - .ndo_get_stats = tile_net_get_stats, + .ndo_get_stats64 = tile_net_get_stats64, .ndo_change_mtu = tile_net_change_mtu, .ndo_tx_timeout = tile_net_tx_timeout, .ndo_set_mac_address = tile_net_set_mac_address, -- cgit v0.10.2 From dcfd8d5830f8cc9062eb7040f455c034e8d160e6 Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Thu, 25 Jul 2013 23:44:01 +0530 Subject: drivers: net: cpsw: Add support for set MAC address Adding support for setting MAC address to cpsw device via ndo_set_mac_address Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1cd1c00..f51fcd4 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1424,6 +1424,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) } +static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = (struct sockaddr *)p; + int flags = 0; + u16 vid = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (priv->data.dual_emac) { + vid = priv->slaves[priv->emac_port].port_vlan; + flags = ALE_VLAN; + } + + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port, + flags, vid); + cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port, + flags, vid); + + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); + memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + for_each_slave(priv, cpsw_set_slave_mac, priv); + + return 0; +} + static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -1518,6 +1545,7 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, + .ndo_set_mac_address = cpsw_ndo_set_mac_address, .ndo_do_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, -- cgit v0.10.2 From 5ad37d5deee1ff7150a2d0602370101de158ad86 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Fri, 26 Jul 2013 17:43:23 +0200 Subject: tcp: add tcp_syncookies mode to allow unconditionally generation of syncookies | If you want to test which effects syncookies have to your | network connections you can set this knob to 2 to enable | unconditionally generation of syncookies. Original idea and first implementation by Eric Dumazet. Cc: Florian Westphal Cc: David Miller Signed-off-by: Eric Dumazet Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 53cea9b..36be26b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -440,6 +440,10 @@ tcp_syncookies - BOOLEAN SYN flood warnings in logs not being really flooded, your server is seriously misconfigured. + If you want to test which effects syncookies have to your + network connections you can set this knob to 2 to enable + unconditionally generation of syncookies. + tcp_fastopen - INTEGER Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data in the opening SYN packet. To use this feature, the client application diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2a5d5c4..280efe5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -890,7 +890,7 @@ bool tcp_syn_flood_action(struct sock *sk, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; - if (!lopt->synflood_warned) { + if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) { lopt->synflood_warned = 1; pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", proto, ntohs(tcp_hdr(skb)->dest), msg); @@ -1462,7 +1462,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * limitations, they conserve resources and peer is * evidently real one. */ - if (inet_csk_reqsk_queue_is_full(sk) && !isn) { + if ((sysctl_tcp_syncookies == 2 || + inet_csk_reqsk_queue_is_full(sk)) && !isn) { want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); if (!want_cookie) goto drop; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b792e87..38c196c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -963,7 +963,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) goto drop; - if (inet_csk_reqsk_queue_is_full(sk) && !isn) { + if ((sysctl_tcp_syncookies == 2 || + inet_csk_reqsk_queue_is_full(sk)) && !isn) { want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); if (!want_cookie) goto drop; -- cgit v0.10.2 From ffd756b3174e496cf6f3c5458c434e31d2cd48b0 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Mon, 29 Jul 2013 13:44:15 +0200 Subject: pktgen: Require CONFIG_INET due to use of IPv4 checksum function Unlike for IPv6, the IPv4 checksum functions are only available if CONFIG_INET is set. Reported-by: kbuild test robot Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/net/Kconfig b/net/Kconfig index 3770249..37162eb 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -281,7 +281,7 @@ menu "Network testing" config NET_PKTGEN tristate "Packet Generator (USE WITH CAUTION)" - depends on PROC_FS + depends on INET && PROC_FS ---help--- This module will inject preconfigured packets, at a configurable rate, out of a given interface. It is used for network interface -- cgit v0.10.2 From 6083ed448b812e7b318820d7ec83c2a0e250dd57 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Mon, 29 Jul 2013 15:21:27 +0200 Subject: net: mvneta: move the RX and TX desc macros outside of the structs The macros used for the various fields of the RX and TX descriptions are currently declared next to those fields within the structure definitions of the RX and TX descriptors. However, in order to support big endian, we'll have to use the "swap descriptors" features of the hardware, which swaps every byte within each 64 bits word of the descriptors. This requires a separate definition of the RX and TX descriptor structures for little and big endian, as is done in the mv643xx_eth. Those macros can therefore no longer be defined inside those structures. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 712779f..649162e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -262,8 +262,7 @@ struct mvneta_port { * layout of the transmit and reception DMA descriptors, and their * layout is therefore defined by the hardware design */ -struct mvneta_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ + #define MVNETA_TX_L3_OFF_SHIFT 0 #define MVNETA_TX_IP_HLEN_SHIFT 8 #define MVNETA_TX_L4_UDP BIT(16) @@ -278,15 +277,6 @@ struct mvneta_tx_desc { #define MVNETA_TX_L4_CSUM_FULL BIT(30) #define MVNETA_TX_L4_CSUM_NOT BIT(31) - u16 reserverd1; /* csum_l4 (for future use) */ - u16 data_size; /* Data size of transmitted packet in bytes */ - u32 buf_phys_addr; /* Physical addr of transmitted buffer */ - u32 reserved2; /* hw_cmd - (for future use, PMT) */ - u32 reserved3[4]; /* Reserved - (for future use) */ -}; - -struct mvneta_rx_desc { - u32 status; /* Info about received packet */ #define MVNETA_RXD_ERR_CRC 0x0 #define MVNETA_RXD_ERR_SUMMARY BIT(16) #define MVNETA_RXD_ERR_OVERRUN BIT(17) @@ -297,13 +287,27 @@ struct mvneta_rx_desc { #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) #define MVNETA_RXD_L4_CSUM_OK BIT(30) +struct mvneta_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u16 reserverd1; /* csum_l4 (for future use) */ + u16 data_size; /* Data size of transmitted packet in bytes */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u32 status; /* Info about received packet */ u16 reserved1; /* pnc_info - (for future use, PnC) */ u16 data_size; /* Size of received packet in bytes */ + u32 buf_phys_addr; /* Physical address of the buffer */ u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved3; /* prefetch_cmd, for future use */ u16 reserved4; /* csum_l4 - (for future use, PnC) */ + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ }; -- cgit v0.10.2 From 9ad8fef6c61a9dfcaa8444e526020de88c540fa9 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Mon, 29 Jul 2013 15:21:28 +0200 Subject: net: mvneta: support big endian Use the "swap descriptor" feature of the hardware to properly swap the descriptors when running in big endian mode. Since the swapping occurs on 64 bits words, we also need to provide a separate structure layout for the DMA descriptors between little endian and big endian mode, like is done in the mv643xx_eth driver. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 649162e..9647976 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -79,10 +79,10 @@ #define MVNETA_MAC_ADDR_HIGH 0x2418 #define MVNETA_SDMA_CONFIG 0x241c #define MVNETA_SDMA_BRST_SIZE_16 4 -#define MVNETA_NO_DESC_SWAP 0x0 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) #define MVNETA_RX_NO_DATA_SWAP BIT(4) #define MVNETA_TX_NO_DATA_SWAP BIT(5) +#define MVNETA_DESC_SWAP BIT(6) #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) #define MVNETA_PORT_STATUS 0x2444 #define MVNETA_TX_IN_PRGRS BIT(1) @@ -287,6 +287,7 @@ struct mvneta_port { #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) #define MVNETA_RXD_L4_CSUM_OK BIT(30) +#if defined(__LITTLE_ENDIAN) struct mvneta_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ u16 reserverd1; /* csum_l4 (for future use) */ @@ -311,6 +312,32 @@ struct mvneta_rx_desc { u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ }; +#else +struct mvneta_tx_desc { + u16 data_size; /* Data size of transmitted packet in bytes */ + u16 reserverd1; /* csum_l4 (for future use) */ + u32 command; /* Options used by HW for packet transmitting.*/ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u16 data_size; /* Size of received packet in bytes */ + u16 reserved1; /* pnc_info - (for future use, PnC) */ + u32 status; /* Info about received packet */ + + u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + u32 buf_phys_addr; /* Physical address of the buffer */ + + u16 reserved4; /* csum_l4 - (for future use, PnC) */ + u16 reserved3; /* prefetch_cmd, for future use */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ + u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ +}; +#endif struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ @@ -908,9 +935,11 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Default burst size */ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); + val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; - val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP | - MVNETA_NO_DESC_SWAP); +#if defined(__BIG_ENDIAN) + val |= MVNETA_DESC_SWAP; +#endif /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); -- cgit v0.10.2 From 670224f12929942cfd74638d31ec18124def65f6 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Fri, 22 Feb 2013 02:14:39 +0000 Subject: ixgbe: Retain VLAN filtering in promiscuous + VT mode When using the new bridge FDB interface to allow SR-IOV virtual function network devices to communicate with SW bridged network devices the physical function is placed into promiscuous mode and hardware VLAN filtering is disabled. This defeats the ability to use VLAN tagging to isolate user networks. When the device is in promiscuous mode and VT mode simultaneously ensure that VLAN hardware filtering remains enabled. Signed-off-by: Greg Rose Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bad8f14..d837f81 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3724,8 +3724,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev) hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); - /* don't hardware filter vlans in promisc mode */ - ixgbe_vlan_filter_disable(adapter); + /* Only disable hardware filter vlans in promiscuous mode + * if SR-IOV and VMDQ are disabled - otherwise ensure + * that hardware VLAN filters remain enabled. + */ + if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED))) + ixgbe_vlan_filter_disable(adapter); + else + ixgbe_vlan_filter_enable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 1e7d587..6c624c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -768,6 +768,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } +static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= IXGBE_VLVF_ENTRIES) + regindex = -1; + + return regindex; +} + static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { @@ -775,6 +798,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); int err; + s32 reg_ndx; + u32 vlvf; + u32 bits; u8 tcs = netdev_get_num_tc(adapter->netdev); if (adapter->vfinfo[vf].pf_vlan || tcs) { @@ -790,10 +816,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, else if (adapter->vfinfo[vf].vlan_count) adapter->vfinfo[vf].vlan_count--; + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + err = ixgbe_set_vf_vlan(adapter, add, vid, vf); if (!err && adapter->vfinfo[vf].spoofchk_enabled) hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + reg_ndx = ixgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + bits &= ~(1 << VMDQ_P(0)); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && + !test_bit(vid, adapter->active_vlans) && !bits) + ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: + return err; } -- cgit v0.10.2 From e507d0cdb3d8b79d806645e68aa310ddb12bfe76 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 26 Mar 2013 00:03:21 +0000 Subject: ixgbe: Use pci_vfs_assigned instead of ixgbe_vfs_are_assigned This change makes it so that the ixgbe driver uses the generic helper pci_vfs_assigned instead of the ixgbe specific function ixgbe_vfs_are_assigned. Signed-off-by: Alexander Duyck Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 6c624c9..73c8e73 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) ixgbe_disable_sriov(adapter); } -static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *vfdev; - int dev_id; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - dev_id = IXGBE_DEV_ID_82599_VF; - break; - case ixgbe_mac_X540: - dev_id = IXGBE_DEV_ID_X540_VF; - break; - default: - return false; - } - - /* loop through all the VFs to see if we own any that are assigned */ - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); - while (vfdev) { - /* if we don't own it we don't care */ - if (vfdev->is_virtfn && vfdev->physfn == pdev) { - /* if it is assigned we cannot release it */ - if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) - return true; - } - - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); - } - - return false; -} - #endif /* #ifdef CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { @@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) * without causing issues, so just leave the hardware * available but disabled */ - if (ixgbe_vfs_are_assigned(adapter)) { + if (pci_vfs_assigned(adapter->pdev)) { e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); return -EPERM; } -- cgit v0.10.2 From 73d80953dfd1d5a92948005798c857c311c2834b Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 31 Jul 2013 02:19:24 +0000 Subject: ixgbe: fix fc autoneg ethtool reporting. Originally ixgbe_device_supports_autoneg_fc() was only expected to be called by copper devices. This would lead to false information to be displayed via ethtool. v2: changed ixgbe_device_supports_autoneg_fc() to a bool function, it returns bool. Based on feedback from David Miller Signed-off-by: Don Skidmore Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 9bcdeb8..ddbc3ee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -65,17 +65,41 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); * function check the device id to see if the associated phy supports * autoneg flow control. **/ -s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { + bool supported = false; + ixgbe_link_speed speed; + bool link_up; - switch (hw->device_id) { - case IXGBE_DEV_ID_X540T: - case IXGBE_DEV_ID_X540T1: - case IXGBE_DEV_ID_82599_T3_LOM: - return 0; + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + break; + case ixgbe_media_type_backplane: + supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + supported = true; + break; + default: + break; + } default: - return IXGBE_ERR_FC_NOT_SUPPORTED; + break; } + + return supported; } /** @@ -234,7 +258,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) IXGBE_GSSR_MAC_CSR_SM); } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - (ixgbe_device_supports_autoneg_fc(hw) == 0)) { + ixgbe_device_supports_autoneg_fc(hw)) { hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, reg_cu); } @@ -2392,7 +2416,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) /* Autoneg flow control on copper adapters */ case ixgbe_media_type_copper: - if (ixgbe_device_supports_autoneg_fc(hw) == 0) + if (ixgbe_device_supports_autoneg_fc(hw)) ret_val = ixgbe_fc_autoneg_copper(hw); break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 22eee38..1315b8a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); -s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 24e2e7a..ae58a92 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -355,10 +355,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - if (hw->fc.disable_fc_autoneg) - pause->autoneg = 0; - else + if (ixgbe_device_supports_autoneg_fc(hw) && + !hw->fc.disable_fc_autoneg) pause->autoneg = 1; + else + pause->autoneg = 0; if (hw->fc.current_mode == ixgbe_fc_rx_pause) { pause->rx_pause = 1; @@ -384,7 +385,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, /* some devices do not support autoneg of link flow control */ if ((pause->autoneg == AUTONEG_ENABLE) && - (ixgbe_device_supports_autoneg_fc(hw) != 0)) + !ixgbe_device_supports_autoneg_fc(hw)) return -EINVAL; fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d837f81..531ee44 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4721,8 +4721,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = - (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true; + hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); #ifdef CONFIG_PCI_IOV /* assign number of SR-IOV VFs */ -- cgit v0.10.2 From 66b52b0dc82c5c88d769dc1c7d44cf45d0deb07c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 29 Jul 2013 18:16:49 +0200 Subject: net: add ndo to get id of physical port of the device This patch adds a ndo for getting physical port of the device. Driver which is aware of being virtual function of some physical port should implement this ndo. This is applicable not only for IOV, but for other solutions (NPAR, multichannel) as well. Basically if there is possible to have multiple netdevs on the single hw port. Signed-off-by: Jiri Pirko Acked-by: Ben Hutchings Signed-off-by: David S. Miller diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ca60b0..875f869 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -728,6 +728,16 @@ struct netdev_fcoe_hbainfo { }; #endif +#define MAX_PHYS_PORT_ID_LEN 32 + +/* This structure holds a unique identifier to identify the + * physical port used by a netdevice. + */ +struct netdev_phys_port_id { + unsigned char id[MAX_PHYS_PORT_ID_LEN]; + unsigned char id_len; +}; + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -932,6 +942,12 @@ struct netdev_fcoe_hbainfo { * that determine carrier state from physical hardware properties (eg * network cables) or protocol-dependent mechanisms (eg * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. + * + * int (*ndo_get_phys_port_id)(struct net_device *dev, + * struct netdev_phys_port_id *ppid); + * Called to get ID of physical port of this device. If driver does + * not implement this, it is assumed that the hw is not able to have + * multiple net devices on single physical port. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1060,6 +1076,8 @@ struct net_device_ops { struct nlmsghdr *nlh); int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); + int (*ndo_get_phys_port_id)(struct net_device *dev, + struct netdev_phys_port_id *ppid); }; /* @@ -2315,6 +2333,8 @@ extern int dev_set_mac_address(struct net_device *, struct sockaddr *); extern int dev_change_carrier(struct net_device *, bool new_carrier); +extern int dev_get_phys_port_id(struct net_device *dev, + struct netdev_phys_port_id *ppid); extern int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq); diff --git a/net/core/dev.c b/net/core/dev.c index dfd9f5d..58eb802 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4989,6 +4989,24 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier) EXPORT_SYMBOL(dev_change_carrier); /** + * dev_get_phys_port_id - Get device physical port ID + * @dev: device + * @ppid: port ID + * + * Get device physical port ID + */ +int dev_get_phys_port_id(struct net_device *dev, + struct netdev_phys_port_id *ppid) +{ + const struct net_device_ops *ops = dev->netdev_ops; + + if (!ops->ndo_get_phys_port_id) + return -EOPNOTSUPP; + return ops->ndo_get_phys_port_id(dev, ppid); +} +EXPORT_SYMBOL(dev_get_phys_port_id); + +/** * dev_new_index - allocate an ifindex * @net: the applicable net namespace * -- cgit v0.10.2 From 66cae9ed6bc46b8cc57a9693f99f69926f3cc7ef Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 29 Jul 2013 18:16:50 +0200 Subject: rtnl: export physical port id via RT netlink Signed-off-by: Jiri Pirko Acked-by: Ben Hutchings Signed-off-by: Narendra K Signed-off-by: David S. Miller diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 03f6170..04c0e7a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -143,6 +143,7 @@ enum { IFLA_NUM_TX_QUEUES, IFLA_NUM_RX_QUEUES, IFLA_CARRIER, + IFLA_PHYS_PORT_ID, __IFLA_MAX }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3de7408..0b2972c 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -767,7 +767,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ - + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ + + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */ + + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */ } static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) @@ -846,6 +847,24 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev) return 0; } +static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) +{ + int err; + struct netdev_phys_port_id ppid; + + err = dev_get_phys_port_id(dev, &ppid); + if (err) { + if (err == -EOPNOTSUPP) + return 0; + return err; + } + + if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id)) + return -EMSGSIZE; + + return 0; +} + static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags, u32 ext_filter_mask) @@ -913,6 +932,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, goto nla_put_failure; } + if (rtnl_phys_port_id_fill(skb, dev)) + goto nla_put_failure; + attr = nla_reserve(skb, IFLA_STATS, sizeof(struct rtnl_link_stats)); if (attr == NULL) @@ -1113,6 +1135,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_PROMISCUITY] = { .type = NLA_U32 }, [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, + [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN }, }; EXPORT_SYMBOL(ifla_policy); -- cgit v0.10.2 From ff80e519ab1b3a6abb2c6bbf684b98be07111879 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 29 Jul 2013 18:16:51 +0200 Subject: net: export physical port id via sysfs Signed-off-by: Jiri Pirko Acked-by: Ben Hutchings Signed-off-by: Narendra K Signed-off-by: David S. Miller diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 981fed3..8826b0d 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -334,6 +334,27 @@ static ssize_t store_group(struct device *dev, struct device_attribute *attr, return netdev_store(dev, attr, buf, len, change_group); } +static ssize_t show_phys_port_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + ssize_t ret = -EINVAL; + + if (!rtnl_trylock()) + return restart_syscall(); + + if (dev_isalive(netdev)) { + struct netdev_phys_port_id ppid; + + ret = dev_get_phys_port_id(netdev, &ppid); + if (!ret) + ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); + } + rtnl_unlock(); + + return ret; +} + static struct device_attribute net_class_attributes[] = { __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL), __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), @@ -355,6 +376,7 @@ static struct device_attribute net_class_attributes[] = { __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, store_tx_queue_len), __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group), + __ATTR(phys_port_id, S_IRUGO, show_phys_port_id, NULL), {} }; -- cgit v0.10.2 From 4e8e1bca6e2584f2b29744f4cfdbb9bbf151d177 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 31 Jul 2013 02:17:40 +0000 Subject: ixgbe: add new media type. This patch adds support for a new media type fiber_fixed. This is useful to avoid all the SFP+ hot plug support path on devices who's fix fiber need not worry about such things. This patch is needed for a following patch that adds support for "fiber_fixed" devices. v2: cleaned up logging message based on feedback from David Miller Signed-off-by: Don Skidmore Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 0b82d38..9d627fd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -527,6 +527,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) } /** + * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * We set the module speed differently for fixed fiber. For other + * multi-speed devices we don't have an error value so here if we + * detect an error we just log it and exit. + */ +static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); + goto out; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); + goto out; + } +out: + return; +} + +/** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed @@ -573,9 +642,14 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, goto out; /* Set the module link speed */ - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); + if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { + ixgbe_set_fiber_fixed_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + } else { + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + } /* Allow module to change analog characteristics (1G->10G) */ msleep(40); @@ -625,10 +699,15 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, goto out; /* Set the module link speed */ - esdp_reg &= ~IXGBE_ESDP_SDP5; - esdp_reg |= IXGBE_ESDP_SDP5_DIR; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); + if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { + ixgbe_set_fiber_fixed_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + } else { + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + } /* Allow module to change analog characteristics (10G->1G) */ msleep(40); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index ddbc3ee..0d09711 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -72,6 +72,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) bool link_up; switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: hw->mac.ops.check_link(hw, &speed, &link_up, false); /* if link is down, assume supported */ @@ -138,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) * we link at 10G, the 1G advertisement is harmless and vice versa. */ switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: case ixgbe_media_type_backplane: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); @@ -2404,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 886a343..2d3433f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -44,6 +44,8 @@ #define IXGBE_SFF_CABLE_SPEC_COMP 0x3C #define IXGBE_SFF_SFF_8472_SWAP 0x5C #define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 /* Bitmasks */ #define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 @@ -54,6 +56,9 @@ #define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 70c6aa3..8968ea0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2622,6 +2622,7 @@ enum ixgbe_sfp_type { enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, + ixgbe_media_type_fiber_fixed, ixgbe_media_type_fiber_lco, ixgbe_media_type_copper, ixgbe_media_type_backplane, -- cgit v0.10.2 From 93ac03be0d5981ecb36292cd956b90ade44f8bd6 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 15 May 2013 07:34:50 +0000 Subject: ixgbe: bump version number Bump the version number to better match with a similar version of the out of tree driver. Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 531ee44..61fa0c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "3.13.10-k" +#define DRV_VERSION "3.15.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2013 Intel Corporation."; -- cgit v0.10.2 From 3dcc2f4142610cc7212a757348d45f1e815927c9 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 29 May 2013 06:23:05 +0000 Subject: ixgbe: fix semaphore lock for I2C read/writes on 82598 ixgbe_read/write_i2c_phy_82598() does not hold the SWFW_SYNC semaphore for the entire function. Instead the lock is held only during the phy.ops.read/write_reg operations. As result when the function is being called simultaneously the I2C read/writes can be corrupted. The following patch introduces the SWFW_SYNC semaphore for the entire ixgbe_read/write_i2c_phy_82598() function. To accomplish this I had to create 2 separate functions: ixgbe_read_phy_reg_mdi() ixgbe_write_phy_reg_mdi() Those functions are identical to ixgbe_read/write_phy_reg_generic() sans the locking, and can be used in ixgbe_read/write_i2c_phy_82598() with the SWFW_SYNC semaphore being held. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 4a5bfb6..d011d5c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, u16 sfp_addr = 0; u16 sfp_data = 0; u16 sfp_stat = 0; + u16 gssr; u32 i; + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + return IXGBE_ERR_SWFW_SYNC; + if (hw->phy.type == ixgbe_phy_nl) { /* * phy SDA/SCL registers are at addresses 0xC30A to @@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, */ sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); - hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, - MDIO_MMD_PMAPMD, - sfp_addr); + hw->phy.ops.write_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + MDIO_MMD_PMAPMD, + sfp_addr); /* Poll status */ for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, - MDIO_MMD_PMAPMD, - &sfp_stat); + hw->phy.ops.read_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + MDIO_MMD_PMAPMD, + &sfp_stat); sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) break; @@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, } /* Read data */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, - MDIO_MMD_PMAPMD, &sfp_data); + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + MDIO_MMD_PMAPMD, &sfp_data); *eeprom_data = (u8)(sfp_data >> 8); } else { @@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, } out: + hw->mac.ops.release_swfw_sync(hw, gssr); return status; } @@ -1326,6 +1336,8 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, + .read_reg_mdi = &ixgbe_read_phy_reg_mdi, + .write_reg_mdi = &ixgbe_write_phy_reg_mdi, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e5691cc..71f554a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -204,7 +204,83 @@ out: } /** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 i, data, command; + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY read command didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + + return 0; +} + +/** * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @phy_data: Pointer to read data from PHY register @@ -212,10 +288,7 @@ out: s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { - u32 command; - u32 i; - u32 data; - s32 status = 0; + s32 status; u16 gssr; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) @@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, else gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { status = IXGBE_ERR_SWFW_SYNC; + } - if (status == 0) { - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + return status; +} - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); +/** + * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 i, command; - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - command = IXGBE_READ_REG(hw, IXGBE_MSCA); + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address command did not complete.\n"); - status = IXGBE_ERR_PHY; - } + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); - if (status == 0) { - /* - * Address cycle complete, setup and write the read - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << - IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY read command didn't complete\n"); - status = IXGBE_ERR_PHY; - } else { - /* - * Read operation is complete. Get the data - * from MSRWD - */ - data = IXGBE_READ_REG(hw, IXGBE_MSRWD); - data >>= IXGBE_MSRWD_READ_DATA_SHIFT; - *phy_data = (u16)(data); - } - } + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } - hw->mac.ops.release_swfw_sync(hw, gssr); + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; } - return status; + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return 0; } /** * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type @@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { - u32 command; - u32 i; - s32 status = 0; + s32 status; u16 gssr; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) @@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, else gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) - status = IXGBE_ERR_SWFW_SYNC; - - if (status == 0) { - /* Put the data in the MDI single read and write data register*/ - IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); - status = IXGBE_ERR_PHY; - } - - if (status == 0) { - /* - * Address cycle complete, setup and write the write - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << - IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); - status = IXGBE_ERR_PHY; - } - } - + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + status = IXGBE_ERR_SWFW_SYNC; } return status; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 2d3433f..647203a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -107,6 +107,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data); s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 8968ea0..4c91ea6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2886,6 +2886,8 @@ struct ixgbe_phy_operations { s32 (*reset)(struct ixgbe_hw *); s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); -- cgit v0.10.2 From a4b6fc6bc61e6aeb24af73c5b195a6aec27d88ff Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 29 May 2013 06:23:10 +0000 Subject: ixgbe: fix SFF data dumps of SFP+ modules This patch fixes several issues with the previous implementation of the SFF data dump of SFP+ modules: - removed the __IXGBE_READ_I2C flag - I2C access locking is handled in the HW specific routines - fixed the read loop to read data from ee->offset to ee->len - the reads fail if __IXGBE_IN_SFP_INIT is set in the process - this is needed because on some HW I2C operations can take long time and disrupt the SFP and link detection process Signed-off-by: Emil Tantilov Reported-by: Ben Hutchings Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7be725c..d882278 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -754,7 +754,6 @@ enum ixgbe_state_t { __IXGBE_DOWN, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, - __IXGBE_READ_I2C, }; struct ixgbe_cb { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ae58a92..da1ea28 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2910,33 +2910,21 @@ static int ixgbe_get_module_info(struct net_device *dev, struct ixgbe_hw *hw = &adapter->hw; u32 status; u8 sff8472_rev, addr_mode; - int ret_val = 0; bool page_swap = false; - /* avoid concurent i2c reads */ - while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - msleep(100); - - /* used by the service task */ - set_bit(__IXGBE_READ_I2C, &adapter->state); - /* Check whether we support SFF-8472 or not */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, &sff8472_rev); - if (status != 0) { - ret_val = -EIO; - goto err_out; - } + if (status != 0) + return -EIO; /* addressing mode is not supported */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_SWAP, &addr_mode); - if (status != 0) { - ret_val = -EIO; - goto err_out; - } + if (status != 0) + return -EIO; if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); @@ -2953,9 +2941,7 @@ static int ixgbe_get_module_info(struct net_device *dev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } -err_out: - clear_bit(__IXGBE_READ_I2C, &adapter->state); - return ret_val; + return 0; } static int ixgbe_get_module_eeprom(struct net_device *dev, @@ -2969,48 +2955,25 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, int i = 0; int ret_val = 0; - /* ixgbe_get_module_info is called before this function in all - * cases, so we do not need any checks we already do above, - * and can trust ee->len to be a known value. - */ + if (ee->len == 0) + return -EINVAL; - while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - msleep(100); - set_bit(__IXGBE_READ_I2C, &adapter->state); + for (i = ee->offset; i < ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return -EBUSY; - /* Read the first block, SFF-8079 */ - for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) { - status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); - if (status != 0) { - /* Error occured while reading module */ + if (i < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) ret_val = -EIO; - goto err_out; - } - data[i] = databyte; - } - /* If the second block is requested, check if SFF-8472 is supported. */ - if (ee->len == ETH_MODULE_SFF_8472_LEN) { - if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP) - return -EOPNOTSUPP; - - /* Read the second block, SFF-8472 */ - for (i = ETH_MODULE_SFF_8079_LEN; - i < ETH_MODULE_SFF_8472_LEN; i++) { - status = hw->phy.ops.read_i2c_sff8472(hw, - i - ETH_MODULE_SFF_8079_LEN, &databyte); - if (status != 0) { - /* Error occured while reading module */ - ret_val = -EIO; - goto err_out; - } - data[i] = databyte; - } + data[i - ee->offset] = databyte; } -err_out: - clear_bit(__IXGBE_READ_I2C, &adapter->state); - return ret_val; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 61fa0c3..2d0e846 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5832,10 +5832,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; - /* concurent i2c reads are not supported */ - if (test_bit(__IXGBE_READ_I2C, &adapter->state)) - return; - /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; -- cgit v0.10.2 From 343e51ae6e3f64ed26d96f5560f4962529794c9f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 31 Jul 2013 06:53:16 +0000 Subject: PCI: expose pcie_link_speed and pcix_bus_speed arrays pcie_link_speed and pcix_bus_speed are arrays used by probe.c to correctly convert lnksta register values into the pci_bus_speed enum. These static arrays are useful outside probe for this purpose. This patch makes these defines into conist arrays and exposes them with an extern header in drivers/pci/pci.h -v2- * move extern declarations to drivers/pci/pci.h CC: Bjorn Helgaas Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d1182c4..948d1a0 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -6,6 +6,9 @@ #define PCI_CFG_SPACE_SIZE 256 #define PCI_CFG_SPACE_EXP_SIZE 4096 +extern const unsigned char pcix_bus_speed[]; +extern const unsigned char pcie_link_speed[]; + /* Functions internal to the PCI core code */ int pci_create_sysfs_dev_files(struct pci_dev *pdev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 46ada5c..496c5b0 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -513,7 +513,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) return bridge; } -static unsigned char pcix_bus_speed[] = { +const unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ PCI_SPEED_100MHz_PCIX, /* 2 */ @@ -532,7 +532,7 @@ static unsigned char pcix_bus_speed[] = { PCI_SPEED_133MHz_PCIX_533 /* F */ }; -static unsigned char pcie_link_speed[] = { +const unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCIE_SPEED_2_5GT, /* 1 */ PCIE_SPEED_5_0GT, /* 2 */ -- cgit v0.10.2 From 59da381ee2afc806f85becf3aa64ffc952355552 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 31 Jul 2013 06:53:21 +0000 Subject: PCI: move enum pcie_link_width into pci.h pcie_link_width is the enum used to define the link width values for a pcie device. This enum should not be contained solely in pci_hotplug.h, and this patch moves it next to pci_bus_speed in pci.h Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Acked-by: Bjorn Helgaas Signed-off-by: Jeff Kirsher diff --git a/include/linux/pci.h b/include/linux/pci.h index 0fd1f15..a0bf22d 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -183,6 +183,19 @@ enum pci_bus_flags { PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, }; +/* These values come from the PCI Express Spec */ +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + /* Based on the PCI Hotplug Spec, but some values are made up by us */ enum pci_bus_speed { PCI_SPEED_33MHz = 0x00, diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 8db71dc..64e61e0 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -28,19 +28,6 @@ #ifndef _PCI_HOTPLUG_H #define _PCI_HOTPLUG_H -/* These values come from the PCI Express Spec */ -enum pcie_link_width { - PCIE_LNK_WIDTH_RESRV = 0x00, - PCIE_LNK_X1 = 0x01, - PCIE_LNK_X2 = 0x02, - PCIE_LNK_X4 = 0x04, - PCIE_LNK_X8 = 0x08, - PCIE_LNK_X12 = 0x0C, - PCIE_LNK_X16 = 0x10, - PCIE_LNK_X32 = 0x20, - PCIE_LNK_WIDTH_UNKNOWN = 0xFF, -}; - /** * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use * @owner: The module owner of this structure -- cgit v0.10.2 From fca418955148e4f4555d7ce911e9eee3e7970a7f Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 29 Jul 2013 11:07:36 -0700 Subject: flow_dissector: clean up IPIP case Explicitly set proto to ETH_P_IP and jump directly to ip processing. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 00ee068..3259446 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -139,7 +139,8 @@ ipv6: break; } case IPPROTO_IPIP: - goto again; + proto = htons(ETH_P_IP); + goto ip; default: break; } -- cgit v0.10.2 From b438f940d3541f478c6b37106ed095f1be7959ef Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 29 Jul 2013 11:07:42 -0700 Subject: flow_dissector: add support for IPPROTO_IPV6 Support IPPROTO_IPV6 similar to IPPROTO_IPIP Signed-off-by: Tom Herbert Signed-off-by: David S. Miller diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 3259446..ade9ff1 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -141,6 +141,9 @@ ipv6: case IPPROTO_IPIP: proto = htons(ETH_P_IP); goto ip; + case IPPROTO_IPV6: + proto = htons(ETH_P_IPV6); + goto ipv6; default: break; } -- cgit v0.10.2 From 4299c8a94fa9d26b54222d7781244137623d4b39 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 29 Jul 2013 22:15:19 +0300 Subject: net: remove an unneeded check "ifa->ifa_label" is an array inside the in_ifaddr struct. It can never be NULL so we can remove this check. Signed-off-by: Dan Carpenter Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 8d48c39..1b7f7ae 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1124,10 +1124,7 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len) if (len < (int) sizeof(ifr)) break; memset(&ifr, 0, sizeof(struct ifreq)); - if (ifa->ifa_label) - strcpy(ifr.ifr_name, ifa->ifa_label); - else - strcpy(ifr.ifr_name, dev->name); + strcpy(ifr.ifr_name, ifa->ifa_label); (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET; (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr = -- cgit v0.10.2 From ba0ccd7affd777cb90ad7279de4143663ae4d485 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 22 Jul 2013 14:25:28 +0200 Subject: ath10k: improve tx throughput on slow machines It is more efficient to move just the 802.11 header instead of the whole payload in most cases. This has no measurable effect on modern hardware. It should improve performance by a few percent on hardware such as an Access Point that have a slow CPU compared to a typical desktop CPU. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index d0a7761..6e01ef6 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1406,9 +1406,9 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw, return; qos_ctl = ieee80211_get_qos_ctl(hdr); - memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN, - skb->len - ieee80211_hdrlen(hdr->frame_control)); - skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN); + memmove(skb->data + IEEE80211_QOS_CTL_LEN, + skb->data, (void *)qos_ctl - (void *)skb->data); + skb_pull(skb, IEEE80211_QOS_CTL_LEN); } static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) -- cgit v0.10.2 From 8865bee4835441d9b3220d779d254d5856a68af0 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 24 Jul 2013 12:36:46 +0200 Subject: ath10k: detect the number of spatial streams supported by hw Until now ath10k assumed 3 spatial streams. However some devices support only 2 spatial streams. This patch improves performance on devices that don't support 3 spatial streams. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 9f21ecb..ff42bb7 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -285,6 +285,7 @@ struct ath10k { u32 hw_max_tx_power; u32 ht_cap_info; u32 vht_cap_info; + u32 num_rf_chains; struct targetdef *targetdef; struct hostdef *hostdef; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 6e01ef6..344ad27 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3093,19 +3093,18 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) { struct ieee80211_sta_vht_cap vht_cap = {0}; u16 mcs_map; + int i; vht_cap.vht_supported = 1; vht_cap.cap = ar->vht_cap_info; - /* FIXME: check dynamically how many streams board supports */ - mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; + mcs_map = 0; + for (i = 0; i < 8; i++) { + if (i < ar->num_rf_chains) + mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2); + else + mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2); + } vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); @@ -3168,7 +3167,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; - for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++) + for (i = 0; i < ar->num_rf_chains; i++) ht_cap.mcs.rx_mask[i] = 0xFF; ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 5e42460..1cbcb2e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -868,6 +868,13 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); ar->phy_capability = __le32_to_cpu(ev->phy_capability); + ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); + + if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { + ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", + ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); + ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; + } ar->ath_common.regulatory.current_rd = __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); @@ -892,7 +899,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, } ath10k_dbg(ATH10K_DBG_WMI, - "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n", + "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", __le32_to_cpu(ev->sw_version), __le32_to_cpu(ev->sw_version_1), __le32_to_cpu(ev->abi_version), @@ -901,7 +908,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, __le32_to_cpu(ev->vht_cap_info), __le32_to_cpu(ev->vht_supp_mcs), __le32_to_cpu(ev->sys_cap_info), - __le32_to_cpu(ev->num_mem_reqs)); + __le32_to_cpu(ev->num_mem_reqs), + __le32_to_cpu(ev->num_rf_chains)); complete(&ar->wmi.service_ready); } -- cgit v0.10.2 From 81377c8d3563e7aec5c8baaaacacb48034f430a0 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 31 Jul 2013 06:53:26 +0000 Subject: PCI: Add function to obtain minimum link width and speed A PCI Express device can potentially report a link width and speed which it will not properly fulfill due to being plugged into a slower link higher in the chain. This function walks up the PCI bus chain and calculates the minimum link width and speed of this entire chain. This can be useful to enable a device to determine if it has enough bandwidth for optimum functionality. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Acked-by: Bjorn Helgaas Signed-off-by: Jeff Kirsher diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e37fea6..c71e78c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3579,6 +3579,49 @@ int pcie_set_mps(struct pci_dev *dev, int mps) } /** + * pcie_get_minimum_link - determine minimum link settings of a PCI device + * @dev: PCI device to query + * @speed: storage for minimum speed + * @width: storage for minimum width + * + * This function will walk up the PCI device chain and determine the minimum + * link width and speed of the device. + */ +int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + int ret; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (dev) { + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + + ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if (ret) + return ret; + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + if (next_speed < *speed) + *speed = next_speed; + + if (next_width < *width) + *width = next_width; + + dev = dev->bus->self; + } + + return 0; +} +EXPORT_SYMBOL(pcie_get_minimum_link); + +/** * pci_select_bars - Make BAR mask from the type of resource * @dev: the PCI device for which BAR mask is made * @flags: resource type mask to be selected diff --git a/include/linux/pci.h b/include/linux/pci.h index a0bf22d..2edbee6 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -934,6 +934,8 @@ int pcie_get_readrq(struct pci_dev *dev); int pcie_set_readrq(struct pci_dev *dev, int rq); int pcie_get_mps(struct pci_dev *dev); int pcie_set_mps(struct pci_dev *dev, int mps); +int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); int __pci_reset_function(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); -- cgit v0.10.2 From e027d1aec4bb49030646d2c186a721f94372d7f2 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 31 Jul 2013 06:53:31 +0000 Subject: ixgbe: call pcie_get_mimimum_link to check if device has enough bandwidth This patch uses the new pcie_get_minimum_link function to perform a check to ensure that the adapter is hooked into a slot which is capable of providing the necessary bandwidth. This check supersedes the original method which only checked the current pci device. The new method is capable of determining the minimum speed and link of an entire PCI chain. -v2- * update the error message to include encoding loss CC: Bjorn Helgaas Signed-off-by: Jacob Keller Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2d0e846..69c6a49 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -195,6 +195,85 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) return 0; } +/** + * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * @hw: hw specific details + * + * This function is used by probe to determine whether a device's PCI-Express + * bandwidth details should be gathered from the parent bus instead of from the + * device. Used to ensure that various locations all have the correct device ID + * checks. + */ +static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) +{ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_SFP_SF_QP: + return true; + default: + return false; + } +} + +static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + int expected_gts) +{ + int max_gts = 0; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + struct pci_dev *pdev; + + /* determine whether to use the the parent device + */ + if (ixgbe_pcie_from_parent(&adapter->hw)) + pdev = adapter->pdev->bus->parent->self; + else + pdev = adapter->pdev; + + if (pcie_get_minimum_link(pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + switch (speed) { + case PCIE_SPEED_2_5GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 2 * width; + break; + case PCIE_SPEED_5_0GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 4 * width; + break; + case PCIE_SPEED_8_0GT: + /* 128b/130b encoding only reduces throughput by 1% */ + max_gts = 8 * width; + break; + default: + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + e_dev_info("PCI Express bandwidth of %dGT/s available\n", + max_gts); + e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n", + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + "Unknown"), + width, + (speed == PCIE_SPEED_2_5GT ? "20%" : + speed == PCIE_SPEED_5_0GT ? "20%" : + speed == PCIE_SPEED_8_0GT ? "N/a" : + "Unknown")); + + if (max_gts < expected_gts) { + e_dev_warn("This is not sufficient for optimal performance of this card.\n"); + e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n", + expected_gts); + e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n"); + } +} + static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) { if (!test_bit(__IXGBE_DOWN, &adapter->state) && @@ -7249,6 +7328,41 @@ static const struct net_device_ops ixgbe_netdev_ops = { }; /** + * ixgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct list_head *entry; + int physfns = 0; + + /* Some cards can not use the generic count PCIe functions method, and + * so must be hardcoded to the correct value. + */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_SFP_SF_QP: + physfns = 4; + break; + default: + list_for_each(entry, &adapter->pdev->bus_list) { + struct pci_dev *pdev = + list_entry(entry, struct pci_dev, bus_list); + /* don't count virtual functions */ + if (!pdev->is_virtfn) + physfns++; + } + } + + return physfns; +} + +/** * ixgbe_wol_supported - Check whether device supports WoL * @hw: hw specific details * @device_id: the device ID @@ -7330,7 +7444,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; - int i, err, pci_using_dac; + int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; #ifdef IXGBE_FCOE @@ -7619,7 +7733,7 @@ skip_sriov: /* pick up the PCI bus settings for reporting later */ hw->mac.ops.get_bus_info(hw); - if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) + if (ixgbe_pcie_from_parent(hw)) ixgbe_get_parent_bus_info(adapter); /* print bus type/speed/width info */ @@ -7645,12 +7759,20 @@ skip_sriov: e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, part_str); - if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { - e_dev_warn("PCI-Express bandwidth available for this card is " - "not sufficient for optimal performance.\n"); - e_dev_warn("For optimal performance a x8 PCI-Express slot " - "is required.\n"); + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure no warning is displayed if it can't be fixed. + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); + break; + default: + expected_gts = ixgbe_enumerate_functions(adapter) * 10; + break; } + ixgbe_check_minimum_link(adapter, expected_gts); /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); -- cgit v0.10.2 From 8fecf67c7eafad28bb6725e326aef8a34ee1c045 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 21 Jun 2013 08:14:32 +0000 Subject: ixgbe: fix lockdep annotation issue for ptp's work item This patch fixes a lockdep issue created due to ixgbe_ptp_stop always running cancel_work_sync even if the work item had not been created properly with INIT_WORK. This is caused because ixgbe_ptp_stop did not check to actually ensure PTP was running first. The new implementation introduces a state in the &adapter->state field which is used to indicate that PTP is running. (This replaces the IXGBE_FLAG2_PTP_ENABLED field). This state will use the atomic set_bit, test_bit, and test_and_clear_bit functions. ixgbe_ptp_stop will check to ensure that PTP was enabled, (and if not, it will not attempt to do any cleanup work from ixgbe_ptp_init). This resolves the lockdep annotation warning found by Stephen Hemminger Reported-by: Stephen Hemminger Signed-off-by: Jacob Keller Acked-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index d882278..6844f39 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -618,9 +618,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) -#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12) +#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11) /* Tx fast path data */ int num_tx_queues; @@ -754,6 +753,7 @@ enum ixgbe_state_t { __IXGBE_DOWN, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, + __IXGBE_PTP_RUNNING, }; struct ixgbe_cb { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 69c6a49..785eafa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4438,7 +4438,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) if (hw->mac.san_mac_rar_index) hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_reset(adapter); } @@ -5766,7 +5766,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) adapter->last_rx_ptp_check = jiffies; - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", @@ -5812,7 +5812,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Down\n"); @@ -6119,7 +6119,7 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) { + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); ixgbe_ptp_rx_hang(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 331987d..5184e2a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) ixgbe_ptp_reset(adapter); - /* set the flag that PTP has been enabled */ - adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED; + /* enter the IXGBE_PTP_RUNNING state */ + set_bit(__IXGBE_PTP_RUNNING, &adapter->state); return; } @@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) */ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { - /* stop the overflow check task */ - adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED | - IXGBE_FLAG2_PTP_PPS_ENABLED); + /* Leave the IXGBE_PTP_RUNNING state. */ + if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + return; + /* stop the PPS signal */ + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; ixgbe_ptp_setup_sdp(adapter); cancel_work_sync(&adapter->ptp_tx_work); -- cgit v0.10.2 From 9c432adaa803c1a6f1065f907caab13248c844df Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 16 Jul 2013 07:57:46 +0000 Subject: ixgbe: rename LL_EXTENDED_STATS to use queue instead of q This patch renames the stats introduced by the busy poll feature so that they are more inline with the current statistics naming schemes. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index da1ea28..50c1e9b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1141,11 +1141,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; #ifdef LL_EXTENDED_STATS - sprintf(p, "tx_q_%u_napi_yield", i); + sprintf(p, "tx_queue_%u_ll_napi_yield", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_q_%u_misses", i); + sprintf(p, "tx_queue_%u_ll_misses", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_q_%u_cleaned", i); + sprintf(p, "tx_queue_%u_ll_cleaned", i); p += ETH_GSTRING_LEN; #endif /* LL_EXTENDED_STATS */ } @@ -1155,11 +1155,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; #ifdef LL_EXTENDED_STATS - sprintf(p, "rx_q_%u_ll_poll_yield", i); + sprintf(p, "rx_queue_%u_ll_poll_yield", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_q_%u_misses", i); + sprintf(p, "rx_queue_%u_ll_misses", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_q_%u_cleaned", i); + sprintf(p, "rx_queue_%u_ll_cleaned", i); p += ETH_GSTRING_LEN; #endif /* LL_EXTENDED_STATS */ } -- cgit v0.10.2 From 674c18b2ed65f9db04e5c2e9c9e4af8fa2af0f1b Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 23 Jul 2013 01:57:03 +0000 Subject: ixgbe: clear semaphore bits on timeouts This patch changes the error code path in ixgbe_acquire_swfw_sync() to deal with cases where acquiring SW semaphore times out. In cases where the SW/FW semaphore bits were set (i.e. due to a crash) the driver will hang on load. With this patch the driver will clear the stuck bits if the semaphore was not acquired in the allotted time. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 0d09711..50e62a2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2506,42 +2506,39 @@ out: **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) { - u32 gssr; + u32 gssr = 0; u32 swmask = mask; u32 fwmask = mask << 5; - s32 timeout = 200; + u32 timeout = 200; + u32 i; - while (timeout) { + for (i = 0; i < timeout; i++) { /* - * SW EEPROM semaphore bit is used for access to all - * SW_FW_SYNC/GSSR bits (not just EEPROM) + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_eeprom_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); - if (!(gssr & (fwmask | swmask))) - break; - - /* - * Firmware currently using resource (fwmask) or other software - * thread currently using resource (swmask) - */ - ixgbe_release_eeprom_semaphore(hw); - usleep_range(5000, 10000); - timeout--; - } - - if (!timeout) { - hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n"); - return IXGBE_ERR_SWFW_SYNC; + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + ixgbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + ixgbe_release_eeprom_semaphore(hw); + usleep_range(5000, 10000); + } } - gssr |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); - ixgbe_release_eeprom_semaphore(hw); - return 0; + usleep_range(5000, 10000); + return IXGBE_ERR_SWFW_SYNC; } /** -- cgit v0.10.2 From a2d7b870d0c99abefe7c302a92f62f0a9aced3b3 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Tue, 9 Jul 2013 01:42:17 +0300 Subject: iwlwifi: mvm: new api to get signal strength A new API that replaces the rx signal strength calculation via agc & rssi. The energy is now calculated outside the driver and transferred by the fw. Signed-off-by: Avri Altman Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index f26f126..46dc38a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -75,14 +75,16 @@ * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD + * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api */ enum iwl_ucode_tlv_flag { - IWL_UCODE_TLV_FLAGS_PAN = BIT(0), - IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), - IWL_UCODE_TLV_FLAGS_MFP = BIT(2), - IWL_UCODE_TLV_FLAGS_P2P = BIT(3), - IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), - IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), + IWL_UCODE_TLV_FLAGS_PAN = BIT(0), + IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), + IWL_UCODE_TLV_FLAGS_MFP = BIT(2), + IWL_UCODE_TLV_FLAGS_P2P = BIT(3), + IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), + IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), + IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8), }; /* The default calibrate table size if not specified by firmware file */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 44614f5..28cab82 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -768,6 +768,14 @@ struct iwl_phy_context_cmd { } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ #define IWL_RX_INFO_PHY_CNT 8 +#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 +#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff +#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 +#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000 +#define IWL_RX_INFO_ENERGY_ANT_A_POS 0 +#define IWL_RX_INFO_ENERGY_ANT_B_POS 8 +#define IWL_RX_INFO_ENERGY_ANT_C_POS 16 + #define IWL_RX_INFO_AGC_IDX 1 #define IWL_RX_INFO_RSSI_AB_IDX 2 #define IWL_OFDM_AGC_A_MSK 0x0000007f diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index e4930d5..c8e4af2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -124,10 +124,6 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, ieee80211_rx_ni(mvm->hw, skb); } -/* - * iwl_mvm_calc_rssi - calculate the rssi in dBm - * @phy_info: the phy information for the coming packet - */ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info) { @@ -136,12 +132,6 @@ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, u32 agc_a, agc_b, max_agc; u32 val; - /* Find max rssi among 2 possible receivers. - * These values are measured by the Digital Signal Processor (DSP). - * They should stay fairly constant even as the signal strength varies, - * if the radio's Automatic Gain Control (AGC) is working right. - * AGC value (see below) will provide the "interesting" info. - */ val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS; agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS; @@ -170,6 +160,32 @@ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, } /* + * iwl_mvm_get_signal_strength - use new rx PHY INFO API + */ +static int iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, + struct iwl_rx_phy_info *phy_info) +{ + int energy_a, energy_b, energy_c, max_energy; + u32 val; + + val = + le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); + energy_a = -((val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> + IWL_RX_INFO_ENERGY_ANT_A_POS); + energy_b = -((val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> + IWL_RX_INFO_ENERGY_ANT_B_POS); + energy_c = -((val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> + IWL_RX_INFO_ENERGY_ANT_C_POS); + max_energy = max(energy_a, energy_b); + max_energy = max(max_energy, energy_c); + + IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", + energy_a, energy_b, energy_c, max_energy); + + return max_energy; +} + +/* * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format * @mvm: the mvm object * @hdr: 80211 header @@ -290,7 +306,10 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ /* Find max signal strength (dBm) among 3 antenna/receiver chains */ - rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info); + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API) + rx_status.signal = iwl_mvm_get_signal_strength(mvm, phy_info); + else + rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info); IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); -- cgit v0.10.2 From 58fa2aad295579a0fcab699eb76bff79eb8df3a8 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 25 Jun 2013 23:01:28 +0300 Subject: iwlwifi: mvm: remove the default calibration values for 7000 The fw is now able to run TX_IQ_SKEW and RX_IQ_SKEW by itself. No need for default values any more. Signed-off-by: Emmanuel Grumbach Reviewed-by: Dor Shaish Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index cd7c003..c76299a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -78,22 +78,6 @@ #define UCODE_VALID_OK cpu_to_le32(0x1) -/* Default calibration values for WkP - set to INIT image w/o running */ -static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; -static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; - -struct iwl_calib_default_data { - u16 size; - void *data; -}; - -#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} - -static const struct iwl_calib_default_data wkp_calib_default_data[12] = { - [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), - [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), -}; - struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -248,40 +232,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) sizeof(phy_cfg_cmd), &phy_cfg_cmd); } -static int iwl_set_default_calibrations(struct iwl_mvm *mvm) -{ - u8 cmd_raw[16]; /* holds the variable size commands */ - struct iwl_set_calib_default_cmd *cmd = - (struct iwl_set_calib_default_cmd *)cmd_raw; - int ret, i; - - /* Setting default values for calibrations we don't run */ - for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) { - u16 cmd_len; - - if (wkp_calib_default_data[i].size == 0) - continue; - - memset(cmd_raw, 0, sizeof(cmd_raw)); - cmd_len = wkp_calib_default_data[i].size + sizeof(cmd); - cmd->calib_index = cpu_to_le16(i); - cmd->length = cpu_to_le16(wkp_calib_default_data[i].size); - if (WARN_ONCE(cmd_len > sizeof(cmd_raw), - "Need to enlarge cmd_raw to %d\n", cmd_len)) - break; - memcpy(cmd->data, wkp_calib_default_data[i].data, - wkp_calib_default_data[i].size); - ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0, - sizeof(*cmd) + - wkp_calib_default_data[i].size, - cmd); - if (ret) - return ret; - } - - return 0; -} - int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) { struct iwl_notification_wait calib_wait; @@ -342,11 +292,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (ret) goto error; - /* need to set default values */ - ret = iwl_set_default_calibrations(mvm); - if (ret) - goto error; - /* * Send phy configurations command to init uCode * to start the 16.0 uCode init image internal calibrations. -- cgit v0.10.2 From ac1ed4163b5a523728fa0e8c27c1ff4d182f40fd Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 4 Jul 2013 15:25:25 +0200 Subject: iwlwifi: mvm: reprobe device on firmware error during restart If we get a firmware error during restart, we currently abandon any hope and simply fail, getting stuck until the driver is reloaded. Unfortunately, there isn't really much else we can do since restart will likely continue to fail, and asking mac80211 for disconnection just causes more error. To allow the user to at least set up the device again completely from scratch, reprobe the device and in doing so completely destroy any mac80211/driver state. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index fa1e1ce..5d5dedd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -644,6 +644,22 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) ieee80211_free_txskb(mvm->hw, skb); } +struct iwl_mvm_reprobe { + struct device *dev; + struct work_struct work; +}; + +static void iwl_mvm_reprobe_wk(struct work_struct *wk) +{ + struct iwl_mvm_reprobe *reprobe; + + reprobe = container_of(wk, struct iwl_mvm_reprobe, work); + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); + kfree(reprobe); + module_put(THIS_MODULE); +} + static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) { iwl_abort_notification_waits(&mvm->notif_wait); @@ -655,7 +671,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) * can't recover this since we're already half suspended. */ if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n"); + struct iwl_mvm_reprobe *reprobe; + + IWL_ERR(mvm, + "Firmware error during reconfiguration - reprobe!\n"); + + /* + * get a module reference to avoid doing this while unloading + * anyway and to avoid scheduling a work with code that's + * being removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(mvm, "Module is being unloaded - abort\n"); + return; + } + + reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); + if (!reprobe) { + module_put(THIS_MODULE); + return; + } + reprobe->dev = mvm->trans->dev; + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); + schedule_work(&reprobe->work); } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && iwlwifi_mod_params.restart_fw) { /* -- cgit v0.10.2 From 88f2fd7300da1b671255ef26469627206fe20a8e Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Tue, 9 Jul 2013 15:25:46 +0300 Subject: iwlwifi: mvm: Enable user set TX power Support Tx power limitations. These limitations can come from mac80211 for various reasons. Signed-off-by: Matti Gottlieb Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index acd2665..b76a9a8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -118,6 +118,7 @@ static const u8 iwl_nvm_channels[] = { #define LAST_2GHZ_HT_PLUS 9 #define LAST_5GHZ_HT 161 +#define DEFAULT_MAX_TX_POWER 16 /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { @@ -232,8 +233,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, /* Initialize regulatory-based run-time data */ - /* TODO: read the real value from the NVM */ - channel->max_power = 0; + /* + * Default value - highest tx power value. max_power + * is not used in mvm, and is used for backwards compatibility + */ + channel->max_power = DEFAULT_MAX_TX_POWER; is_5ghz = channel->band == IEEE80211_BAND_5GHZ; IWL_DEBUG_EEPROM(dev, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 28cab82..55854a3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -159,6 +159,7 @@ enum { TX_ANT_CONFIGURATION_CMD = 0x98, BT_CONFIG = 0x9b, STATISTICS_NOTIFICATION = 0x9d, + REDUCE_TX_POWER_CMD = 0x9f, /* RF-KILL commands and notifications */ CARD_STATE_CMD = 0xa0, @@ -226,6 +227,19 @@ struct iwl_tx_ant_cfg_cmd { __le32 valid; } __packed; +/** + * struct iwl_reduce_tx_power_cmd - TX power reduction command + * REDUCE_TX_POWER_CMD = 0x9f + * @flags: (reserved for future implementation) + * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @pwr_restriction: TX power restriction in dBms. + */ +struct iwl_reduce_tx_power_cmd { + u8 flags; + u8 mac_context_id; + __le16 pwr_restriction; +} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ + /* * Calibration control struct. * Sent as part of the phy configuration command. diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 30319e0..fbb8c2d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -714,6 +714,20 @@ out_release: mutex_unlock(&mvm->mutex); } +static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + s8 tx_power) +{ + /* FW is in charge of regulatory enforcement */ + struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = { + .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id, + .pwr_restriction = cpu_to_le16(tx_power), + }; + + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC, + sizeof(reduce_txpwr_cmd), + &reduce_txpwr_cmd); +} + static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) { return 0; @@ -794,6 +808,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } + if (changes & BSS_CHANGED_TXPOWER) { + IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", + bss_conf->txpower); + iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); + } } static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 5d5dedd..70ac726 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -275,6 +275,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BEACON_NOTIFICATION), CMD(BEACON_TEMPLATE_CMD), CMD(STATISTICS_NOTIFICATION), + CMD(REDUCE_TX_POWER_CMD), CMD(TX_ANT_CONFIGURATION_CMD), CMD(D3_CONFIG_CMD), CMD(PROT_OFFLOAD_CONFIG_CMD), -- cgit v0.10.2 From 5369d6c167317a99caf2c4c87957f07cd77a1888 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 8 Jul 2013 11:17:06 +0200 Subject: iwlwifi: mvm: support six IPv6 addresses in D3 Newer firmware supports offloading more IPv6 addresses for NDP, adjust the code to send the correct command depending on the firmware capability. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 46dc38a..b766ee9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -76,6 +76,8 @@ * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api + * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six + * (rather than two) IPv6 addresses */ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_PAN = BIT(0), @@ -85,6 +87,7 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8), + IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), }; /* The default calibrate table size if not specified by firmware file */ diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 7e5e5c2..ebf7f94 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -105,7 +105,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, list_for_each_entry(ifa, &idev->addr_list, if_list) { mvmvif->target_ipv6_addrs[idx] = ifa->addr; idx++; - if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS) + if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) break; } read_unlock_bh(&idev->lock); @@ -373,36 +373,68 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_proto_offload_cmd cmd = {}; + union { + struct iwl_proto_offload_cmd_v1 v1; + struct iwl_proto_offload_cmd_v2 v2; + } cmd = {}; + struct iwl_proto_offload_cmd_common *common; + u32 enabled = 0, size; #if IS_ENABLED(CONFIG_IPV6) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int i; - if (mvmvif->num_target_ipv6_addrs) { - cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS); - memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN); - } + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { + if (mvmvif->num_target_ipv6_addrs) { + enabled |= IWL_D3_PROTO_OFFLOAD_NS; + memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); + } + + BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != + sizeof(mvmvif->target_ipv6_addrs[0])); + + for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, + IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) + memcpy(cmd.v2.target_ipv6_addr[i], + &mvmvif->target_ipv6_addrs[i], + sizeof(cmd.v2.target_ipv6_addr[i])); + } else { + if (mvmvif->num_target_ipv6_addrs) { + enabled |= IWL_D3_PROTO_OFFLOAD_NS; + memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); + } - BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) != - sizeof(mvmvif->target_ipv6_addrs[i])); + BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != + sizeof(mvmvif->target_ipv6_addrs[0])); - for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++) - memcpy(cmd.target_ipv6_addr[i], - &mvmvif->target_ipv6_addrs[i], - sizeof(cmd.target_ipv6_addr[i])); + for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, + IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) + memcpy(cmd.v1.target_ipv6_addr[i], + &mvmvif->target_ipv6_addrs[i], + sizeof(cmd.v1.target_ipv6_addr[i])); + } #endif + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { + common = &cmd.v2.common; + size = sizeof(cmd.v2); + } else { + common = &cmd.v1.common; + size = sizeof(cmd.v1); + } + if (vif->bss_conf.arp_addr_cnt) { - cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP); - cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; - memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN); + enabled |= IWL_D3_PROTO_OFFLOAD_ARP; + common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; + memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); } - if (!cmd.enabled) + if (!enabled) return 0; + common->enabled = cpu_to_le32(enabled); + return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC, - sizeof(cmd), &cmd); + size, &cmd); } enum iwl_mvm_tcp_packet_type { diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index 6f8b2c1..df72fcdf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -98,34 +98,63 @@ enum iwl_proto_offloads { IWL_D3_PROTO_OFFLOAD_NS = BIT(1), }; -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6 /** - * struct iwl_proto_offload_cmd - ARP/NS offload configuration + * struct iwl_proto_offload_cmd_common - ARP/NS offload common part * @enabled: enable flags * @remote_ipv4_addr: remote address to answer to (or zero if all) * @host_ipv4_addr: our IPv4 address to respond to queries for * @arp_mac_addr: our MAC address for ARP responses - * @remote_ipv6_addr: remote address to answer to (or zero if all) - * @solicited_node_ipv6_addr: broken -- solicited node address exists - * for each target address - * @target_ipv6_addr: our target addresses - * @ndp_mac_addr: neighbor soliciation response MAC address + * @reserved: unused */ -struct iwl_proto_offload_cmd { +struct iwl_proto_offload_cmd_common { __le32 enabled; __be32 remote_ipv4_addr; __be32 host_ipv4_addr; u8 arp_mac_addr[ETH_ALEN]; - __le16 reserved1; + __le16 reserved; +} __packed; +/** + * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @remote_ipv6_addr: remote address to answer to (or zero if all) + * @solicited_node_ipv6_addr: broken -- solicited node address exists + * for each target address + * @target_ipv6_addr: our target addresses + * @ndp_mac_addr: neighbor soliciation response MAC address + */ +struct iwl_proto_offload_cmd_v1 { + struct iwl_proto_offload_cmd_common common; u8 remote_ipv6_addr[16]; u8 solicited_node_ipv6_addr[16]; - u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16]; + u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16]; u8 ndp_mac_addr[ETH_ALEN]; __le16 reserved2; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ +/** + * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @remote_ipv6_addr: remote address to answer to (or zero if all) + * @solicited_node_ipv6_addr: broken -- solicited node address exists + * for each target address + * @target_ipv6_addr: our target addresses + * @ndp_mac_addr: neighbor soliciation response MAC address + */ +struct iwl_proto_offload_cmd_v2 { + struct iwl_proto_offload_cmd_common common; + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16]; + u8 ndp_mac_addr[ETH_ALEN]; + u8 numValidIPv6Addresses; + u8 reserved2[3]; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ + /* * WOWLAN_PATTERNS diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index caa6a175..879fb01 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -282,7 +282,7 @@ struct iwl_mvm_vif { #if IS_ENABLED(CONFIG_IPV6) /* IPv6 addresses for WoWLAN */ - struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS]; + struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; int num_target_ipv6_addrs; #endif #endif -- cgit v0.10.2 From 0f2ed58e6e185bb7f22a534d12b755b7459177ef Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Tue, 2 Jul 2013 19:51:09 +0300 Subject: iwlwifi: mvm: Change AM->PSM timeout for EAPOL frames Currently after sending EAPOL frame FW transition to power saving mode within 10 or 100 msec (as specified by power table command). According to new requirement this timeout for a specific EAPOL frame must be controlled by the driver by setting tx_pm_timeout field of TX_CMD to 2 (PM_FRAME_ENUM_MGMT). This value corresponds to 32 msec timeout. Signed-off-by: Alexander Bondar Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index f0e96a9..7694b8f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -123,6 +123,8 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, * it */ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); + } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + tx_cmd->pm_frame_timeout = cpu_to_le16(2); } else { tx_cmd->pm_frame_timeout = 0; } -- cgit v0.10.2 From fd11bd05552e8639abbdc2f1d478f70dfb9b5e3e Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Sun, 16 Jun 2013 12:18:11 +0300 Subject: iwlwifi: mvm: Return on inconsistency in add interface Return in case that HW restart is in progress but the added interface is not found during the iteration over all the interfaces. Signed-off-by: Ilan Peer Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 94aae9c..5fe23a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -264,7 +264,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; /* Therefore, in recovery, we can't get here */ - WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); + if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) + return -EBUSY; mvmvif->id = find_first_bit(data.available_mac_ids, NUM_MAC_INDEX_DRIVER); -- cgit v0.10.2 From 7b8359cf2bda0103688f9cc623ce6c221a6004c1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 10 Jul 2013 12:59:38 +0200 Subject: iwlwifi: mvm: assign quota per virtual interface Instead of assigning quota per used binding (channel) assign the same amount of quota for each virtual interface so that when there are more than two interfaces using more than one channel, we'll stay on the channels proportionally to the number of virtual interfaces using the channels. Reviewed-by: Emmanuel Grumbach Reviewed-by: Ilan Peer Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c index 29d49cf..1897387 100644 --- a/drivers/net/wireless/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/iwlwifi/mvm/quota.c @@ -132,7 +132,7 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) { struct iwl_time_quota_cmd cmd; - int i, idx, ret, num_active_bindings, quota, quota_rem; + int i, idx, ret, num_active_macs, quota, quota_rem; struct iwl_mvm_quota_iterator_data data = { .n_interfaces = {}, .colors = { -1, -1, -1, -1 }, @@ -162,18 +162,17 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) * IWL_MVM_MAX_QUOTA fragments. Divide these fragments * equally between all the bindings that require quota */ - num_active_bindings = 0; + num_active_macs = 0; for (i = 0; i < MAX_BINDINGS; i++) { cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); - if (data.n_interfaces[i] > 0) - num_active_bindings++; + num_active_macs += data.n_interfaces[i]; } quota = 0; quota_rem = 0; - if (num_active_bindings) { - quota = IWL_MVM_MAX_QUOTA / num_active_bindings; - quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings; + if (num_active_macs) { + quota = IWL_MVM_MAX_QUOTA / num_active_macs; + quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs; } for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { @@ -187,7 +186,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) cmd.quotas[idx].quota = cpu_to_le32(0); cmd.quotas[idx].max_duration = cpu_to_le32(0); } else { - cmd.quotas[idx].quota = cpu_to_le32(quota); + cmd.quotas[idx].quota = + cpu_to_le32(quota * data.n_interfaces[i]); cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); } -- cgit v0.10.2 From 99545924a10c2c8ee2cc04a0d1be38a4c5b3ef66 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 14 Jun 2013 13:36:21 +0200 Subject: iwlwifi: mvm: split constants into separate file To make maintaining some constant default values in the driver easier, declare them in a separate file. Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h new file mode 100644 index 0000000..64656e0 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -0,0 +1,71 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __MVM_CONSTANTS_H +#define __MVM_CONSTANTS_H + +#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) +#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) +#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) +#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) + +#endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 879fb01..8891c81 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -76,6 +76,7 @@ #include "iwl-trans.h" #include "sta.h" #include "fw-api.h" +#include "constants.h" #define IWL_INVALID_MAC80211_QUEUE 0xff #define IWL_MVM_MAX_ADDRESSES 5 diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 644bf47..8ed0403 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -217,11 +217,15 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, } if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { - cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); - cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + cmd->rx_data_timeout = + cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout = + cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT); } else { - cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); - cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); + cmd->rx_data_timeout = + cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout = + cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); } #ifdef CONFIG_IWLWIFI_DEBUGFS -- cgit v0.10.2 From 77740cb433fd5c4394cad4bb948e5c550b027837 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 26 Jun 2013 23:51:41 +0300 Subject: iwlwifi: mvm: register vif debugfs for AP mode too The current registered the per-vif debugfs handler for STA mode only. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index fbb8c2d..13cc772 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -526,6 +526,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_release; } + iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; } -- cgit v0.10.2 From 5dca7c241e92a5c619260ad969b53b2c4849c340 Mon Sep 17 00:00:00 2001 From: Hila Gonen Date: Tue, 16 Jul 2013 11:15:35 +0300 Subject: iwlwifi: mvm: Change beacon filtering command Change beacon filtering command due to a change in the API. In case the FW supports the old API, we do not send the BF HCMD and assume that since the corresponding struct in the FW is zeroed by default then we don't need to disable it in the FW actively. Signed-off-by: Hila Gonen Signed-off-by: Dor Shaish Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index b766ee9..bd335f0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -78,6 +78,7 @@ * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * (rather than two) IPv6 addresses + * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API */ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_PAN = BIT(0), @@ -88,6 +89,7 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8), IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), + IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11), }; /* The default calibrate table size if not specified by firmware file */ diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 5d669da..347aa1c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -632,8 +632,14 @@ static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, case MVM_DEBUGFS_BF_ROAMING_STATE: dbgfs_bf->bf_roaming_state = value; break; - case MVM_DEBUGFS_BF_TEMPERATURE_DELTA: - dbgfs_bf->bf_temperature_delta = value; + case MVM_DEBUGFS_BF_TEMP_THRESHOLD: + dbgfs_bf->bf_temp_threshold = value; + break; + case MVM_DEBUGFS_BF_TEMP_FAST_FILTER: + dbgfs_bf->bf_temp_fast_filter = value; + break; + case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER: + dbgfs_bf->bf_temp_slow_filter = value; break; case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: dbgfs_bf->bf_enable_beacon_filter = value; @@ -692,13 +698,27 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file, value > IWL_BF_ROAMING_STATE_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ROAMING_STATE; - } else if (!strncmp("bf_temperature_delta=", buf, 21)) { - if (sscanf(buf+21, "%d", &value) != 1) + } else if (!strncmp("bf_temp_threshold=", buf, 18)) { + if (sscanf(buf+18, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_THRESHOLD_MIN || + value > IWL_BF_TEMP_THRESHOLD_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_THRESHOLD; + } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) { + if (sscanf(buf+20, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_FAST_FILTER_MIN || + value > IWL_BF_TEMP_FAST_FILTER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER; + } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) { + if (sscanf(buf+20, "%d", &value) != 1) return -EINVAL; - if (value < IWL_BF_TEMPERATURE_DELTA_MIN || - value > IWL_BF_TEMPERATURE_DELTA_MAX) + if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || + value > IWL_BF_TEMP_SLOW_FILTER_MAX) return -EINVAL; - param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA; + param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER; } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { if (sscanf(buf+24, "%d", &value) != 1) return -EINVAL; @@ -760,41 +780,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file, int pos = 0; const size_t bufsz = sizeof(buf); struct iwl_beacon_filter_cmd cmd = { - .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, - .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, - .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, - .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, - .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT, - .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, - .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), - .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT), - .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT, + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = + cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT), + .ba_enable_beacon_abort = + cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT), }; iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); if (mvmvif->bf_enabled) - cmd.bf_enable_beacon_filter = 1; + cmd.bf_enable_beacon_filter = cpu_to_le32(1); else cmd.bf_enable_beacon_filter = 0; pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", - cmd.bf_energy_delta); + le32_to_cpu(cmd.bf_energy_delta)); pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", - cmd.bf_roaming_energy_delta); + le32_to_cpu(cmd.bf_roaming_energy_delta)); pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", - cmd.bf_roaming_state); - pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n", - cmd.bf_temperature_delta); + le32_to_cpu(cmd.bf_roaming_state)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n", + le32_to_cpu(cmd.bf_temp_threshold)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n", + le32_to_cpu(cmd.bf_temp_fast_filter)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n", + le32_to_cpu(cmd.bf_temp_slow_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", - cmd.bf_enable_beacon_filter); + le32_to_cpu(cmd.bf_enable_beacon_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", - cmd.bf_debug_flag); + le32_to_cpu(cmd.bf_debug_flag)); pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", - cmd.bf_escape_timer); + le32_to_cpu(cmd.bf_escape_timer)); pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", - cmd.ba_escape_timer); + le32_to_cpu(cmd.ba_escape_timer)); pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", - cmd.ba_enable_beacon_abort); + le32_to_cpu(cmd.ba_enable_beacon_abort)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 300211d..149347d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -216,11 +216,21 @@ struct iwl_mac_power_cmd { * calculated for current beacon is less than the threshold, use * Roaming Energy Delta Threshold, otherwise use normal Energy Delta * Threshold. Typical energy threshold is -72dBm. - * @bf_temperature_delta: Send Beacon to driver if delta in temperature values - * calculated for this and the last passed beacon is greater than this - * threshold. Zero value means that the temperature changeis ignored for + * @bf_temp_threshold: This threshold determines the type of temperature + * filtering (Slow or Fast) that is selected (Units are in Celsuis): + * If the current temperature is above this threshold - Fast filter + * will be used, If the current temperature is below this threshold - + * Slow filter will be used. + * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature change is ignored for * beacon filtering; beacons will not be forced to be sent to driver * regardless of whether its temerature has been changed. + * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature change is ignored for + * beacon filtering; beacons will not be forced to be sent to driver + * regardless of whether its temerature has been changed. * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed * for a specific period of time. Units: Beacons. @@ -229,17 +239,17 @@ struct iwl_mac_power_cmd { * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. */ struct iwl_beacon_filter_cmd { - u8 bf_energy_delta; - u8 bf_roaming_energy_delta; - u8 bf_roaming_state; - u8 bf_temperature_delta; - u8 bf_enable_beacon_filter; - u8 bf_debug_flag; - __le16 reserved1; + __le32 bf_energy_delta; + __le32 bf_roaming_energy_delta; + __le32 bf_roaming_state; + __le32 bf_temp_threshold; + __le32 bf_temp_fast_filter; + __le32 bf_temp_slow_filter; + __le32 bf_enable_beacon_filter; + __le32 bf_debug_flag; __le32 bf_escape_timer; __le32 ba_escape_timer; - u8 ba_enable_beacon_abort; - u8 reserved2[3]; + __le32 ba_enable_beacon_abort; } __packed; /* Beacon filtering and beacon abort */ @@ -255,9 +265,17 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_ROAMING_STATE_MAX 255 #define IWL_BF_ROAMING_STATE_MIN 0 -#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5 -#define IWL_BF_TEMPERATURE_DELTA_MAX 255 -#define IWL_BF_TEMPERATURE_DELTA_MIN 0 +#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 +#define IWL_BF_TEMP_THRESHOLD_MAX 255 +#define IWL_BF_TEMP_THRESHOLD_MIN 0 + +#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 +#define IWL_BF_TEMP_FAST_FILTER_MAX 255 +#define IWL_BF_TEMP_FAST_FILTER_MIN 0 + +#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 +#define IWL_BF_TEMP_SLOW_FILTER_MAX 255 +#define IWL_BF_TEMP_SLOW_FILTER_MIN 0 #define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 @@ -273,13 +291,16 @@ struct iwl_beacon_filter_cmd { #define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 -#define IWL_BF_CMD_CONFIG_DEFAULTS \ - .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \ - .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \ - .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \ - .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \ - .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \ - .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \ +#define IWL_BF_CMD_CONFIG_DEFAULTS \ + .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \ + .bf_roaming_energy_delta = \ + cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \ + .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \ + .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \ + .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \ + .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \ + .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \ + .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT) #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 13cc772..8e4e79a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -563,7 +563,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, /* beacon filtering */ if (!mvm->bf_allowed_vif && - vif->type == NL80211_IFTYPE_STATION && !vif->p2p){ + vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ mvm->bf_allowed_vif = mvmvif; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 8891c81..dda0cd0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -195,24 +195,28 @@ enum iwl_dbgfs_bf_mask { MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), - MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3), - MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4), - MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5), - MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6), - MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7), - MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8), + MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), + MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), + MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), + MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), + MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), + MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), + MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), + MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), }; struct iwl_dbgfs_bf { - u8 bf_energy_delta; - u8 bf_roaming_energy_delta; - u8 bf_roaming_state; - u8 bf_temperature_delta; - u8 bf_enable_beacon_filter; - u8 bf_debug_flag; + u32 bf_energy_delta; + u32 bf_roaming_energy_delta; + u32 bf_roaming_state; + u32 bf_temp_threshold; + u32 bf_temp_fast_filter; + u32 bf_temp_slow_filter; + u32 bf_enable_beacon_filter; + u32 bf_debug_flag; u32 bf_escape_timer; u32 ba_escape_timer; - u8 ba_enable_beacon_abort; + u32 ba_enable_beacon_abort; int mask; }; #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 8ed0403..30306a9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -85,23 +85,27 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, if (!ret) { IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", - cmd->ba_enable_beacon_abort); + le32_to_cpu(cmd->ba_enable_beacon_abort)); IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", - cmd->ba_escape_timer); + le32_to_cpu(cmd->ba_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", - cmd->bf_debug_flag); + le32_to_cpu(cmd->bf_debug_flag)); IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", - cmd->bf_enable_beacon_filter); + le32_to_cpu(cmd->bf_enable_beacon_filter)); IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", - cmd->bf_energy_delta); + le32_to_cpu(cmd->bf_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", - cmd->bf_escape_timer); + le32_to_cpu(cmd->bf_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", - cmd->bf_roaming_energy_delta); + le32_to_cpu(cmd->bf_roaming_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", - cmd->bf_roaming_state); - IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n", - cmd->bf_temperature_delta); + le32_to_cpu(cmd->bf_roaming_state)); + IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", + le32_to_cpu(cmd->bf_temp_threshold)); + IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", + le32_to_cpu(cmd->bf_temp_fast_filter)); + IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", + le32_to_cpu(cmd->bf_temp_slow_filter)); } return ret; } @@ -112,8 +116,8 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = 1, - .ba_enable_beacon_abort = enable, + .bf_enable_beacon_filter = cpu_to_le32(1), + .ba_enable_beacon_abort = cpu_to_le32(enable), }; if (!mvmvif->bf_enabled) @@ -369,22 +373,30 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) - cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta; + cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) cmd->bf_roaming_energy_delta = - dbgfs_bf->bf_roaming_energy_delta; + cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) - cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state; - if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA) - cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta; + cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) + cmd->bf_temp_threshold = + cpu_to_le32(dbgfs_bf->bf_temp_threshold); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) + cmd->bf_temp_fast_filter = + cpu_to_le32(dbgfs_bf->bf_temp_fast_filter); + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) + cmd->bf_temp_slow_filter = + cpu_to_le32(dbgfs_bf->bf_temp_slow_filter); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) - cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag; + cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) - cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort; + cmd->ba_enable_beacon_abort = + cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort); } #endif @@ -394,7 +406,7 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = 1, + .bf_enable_beacon_filter = cpu_to_le32(1), }; int ret; @@ -418,7 +430,8 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) || + vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); -- cgit v0.10.2 From 291aa7c4a4c01ef925b366174fc05ff2a757fad0 Mon Sep 17 00:00:00 2001 From: Eran Harary Date: Wed, 3 Jul 2013 11:00:06 +0300 Subject: iwlwifi: mvm: fix debugfs restart if fw_restart is disabled If fw_restart is disabled, using the fw_restart debugfs file will enable fw_restart and then send the failing command, but this still frequently fails restart because it resets fw_restart afterwards and is thus racy. Fix this by tracking fw_restart separately and allowing "always restart", "never restart" and "restart N times". Signed-off-by: Eran Harary Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 347aa1c..15d52ba 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -597,20 +597,19 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; - bool restart_fw = iwlwifi_mod_params.restart_fw; int ret; - iwlwifi_mod_params.restart_fw = true; - mutex_lock(&mvm->mutex); + /* allow one more restart that we're provoking here */ + if (mvm->restart_fw >= 0) + mvm->restart_fw++; + /* take the return value to make compiler happy - it will fail anyway */ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL); mutex_unlock(&mvm->mutex); - iwlwifi_mod_params.restart_fw = restart_fw; - return count; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index dda0cd0..5dc5dfd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -477,6 +477,9 @@ struct iwl_mvm { */ u8 vif_count; + /* -1 for always, 0 for never, >0 for that many times */ + s8 restart_fw; + struct led_classdev led; struct ieee80211_vif *p2p_device_vif; diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 70ac726..65e5fb8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -342,6 +342,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw = fw; mvm->hw = hw; + mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; + mutex_init(&mvm->mutex); spin_lock_init(&mvm->async_handlers_lock); INIT_LIST_HEAD(&mvm->time_event_list); @@ -695,8 +697,7 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); - } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && - iwlwifi_mod_params.restart_fw) { + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) { /* * This is a bit racy, but worst case we tell mac80211 about * a stopped/aborted (sched) scan when that was already done @@ -714,6 +715,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) break; } + if (mvm->restart_fw > 0) + mvm->restart_fw--; ieee80211_restart_hw(mvm->hw); } } @@ -723,7 +726,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); iwl_mvm_dump_nic_error_log(mvm); - if (!iwlwifi_mod_params.restart_fw) + if (!mvm->restart_fw) iwl_mvm_dump_sram(mvm); iwl_mvm_nic_restart(mvm); -- cgit v0.10.2 From 226eb8cd22b6cddccd6e109a8400427ea6fed080 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 3 Jul 2013 11:55:17 +0200 Subject: iwlwifi: mvm: report per-chain signal to mac80211 Instead of reporting the maximum signal strength and the antenna bitmap in the antenna field (which is really just for radiotap and defined differently), report the signal strength values per chain and set the chain bitmap. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index c8e4af2..6fd7fae 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -124,8 +124,9 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, ieee80211_rx_ni(mvm->hw, skb); } -static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, - struct iwl_rx_phy_info *phy_info) +static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm, + struct iwl_rx_phy_info *phy_info, + struct ieee80211_rx_status *rx_status) { int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; int rssi_all_band_a, rssi_all_band_b; @@ -156,14 +157,20 @@ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n", rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b); - return max_rssi_dbm; + rx_status->signal = max_rssi_dbm; + rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & + RX_RES_PHY_FLAGS_ANTENNA) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + rx_status->chain_signal[0] = rssi_a_dbm; + rx_status->chain_signal[1] = rssi_b_dbm; } /* * iwl_mvm_get_signal_strength - use new rx PHY INFO API */ -static int iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, - struct iwl_rx_phy_info *phy_info) +static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, + struct iwl_rx_phy_info *phy_info, + struct ieee80211_rx_status *rx_status) { int energy_a, energy_b, energy_c, max_energy; u32 val; @@ -182,7 +189,13 @@ static int iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", energy_a, energy_b, energy_c, max_energy); - return max_energy; + rx_status->signal = max_energy; + rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & + RX_RES_PHY_FLAGS_ANTENNA) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + rx_status->chain_signal[0] = energy_a; + rx_status->chain_signal[1] = energy_b; + rx_status->chain_signal[2] = energy_c; } /* @@ -305,32 +318,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, */ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ - /* Find max signal strength (dBm) among 3 antenna/receiver chains */ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API) - rx_status.signal = iwl_mvm_get_signal_strength(mvm, phy_info); + iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status); else - rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info); + iwl_mvm_calc_rssi(mvm, phy_info, &rx_status); IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); - /* - * "antenna number" - * - * It seems that the antenna field in the phy flags value - * is actually a bit field. This is undefined by radiotap, - * it wants an actual antenna number but I always get "7" - * for most legacy frames I receive indicating that the - * same frame was received on all three RX chains. - * - * I think this field should be removed in favor of a - * new 802.11n radiotap field "RX chains" that is defined - * as a bitmask. - */ - rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) & - RX_RES_PHY_FLAGS_ANTENNA) - >> RX_RES_PHY_FLAGS_ANTENNA_POS; - /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) rx_status.flag |= RX_FLAG_SHORTPRE; -- cgit v0.10.2 From 913e25640ca10a82894378cab21a846f722e260c Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 21 Jul 2013 08:04:28 +0300 Subject: iwlwifi: mvm: remove obsolete flag in TX command API This API isn't valid any more. It wasn't used anyway. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h index 700cce7..d606197 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h @@ -91,7 +91,6 @@ * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation - * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped * @TX_CMD_FLG_EXEC_PAPD: execute PAPD @@ -120,7 +119,6 @@ enum iwl_tx_flags { TX_CMD_FLG_RESP_TO_DRV = BIT(21), TX_CMD_FLG_CCMP_AGG = BIT(22), TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), - TX_CMD_FLG_CTS_ONLY = BIT(24), TX_CMD_FLG_DUR = BIT(25), TX_CMD_FLG_FW_DROP = BIT(26), TX_CMD_FLG_EXEC_PAPD = BIT(27), -- cgit v0.10.2 From 2f2ad0d0722d4319f16161022da6e013a075f65b Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Mon, 15 Jul 2013 14:57:28 +0300 Subject: iwlwifi: remove duplicate rate scale init code The rate scale windows are cleared twice as part of the init. Cleanup this duplication in both mvm and dvm. Signed-off-by: Eyal Shapira Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 1b69394..3395387 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -2826,9 +2826,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i lq_sta->flush_timer = 0; lq_sta->supp_rates = sta->supp_rates[sband->band]; - for (j = 0; j < LQ_SIZE; j++) - for (i = 0; i < IWL_RATE_COUNT; i++) - rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n", sta_id); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 3856c28..e4a7b387 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -2688,9 +2688,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, lq_sta->flush_timer = 0; lq_sta->supp_rates = sta->supp_rates[sband->band]; - for (j = 0; j < LQ_SIZE; j++) - for (i = 0; i < IWL_RATE_COUNT; i++) - rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); IWL_DEBUG_RATE(mvm, "LQ: *** rate scale station global init for station %d ***\n", -- cgit v0.10.2 From 8434925f20fbcac3c2699f33f069c8071bad1214 Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Mon, 22 Jul 2013 15:39:26 +0300 Subject: iwlwifi: mvm: Change beacon abort escape time value Set beacon abort escape timer values - 6 beacons in D0 state, 9 beacons in D3 and D0i3. Signed-off-by: Alexander Bondar Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 149347d..060e630 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -285,7 +285,8 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_ESCAPE_TIMER_MAX 1024 #define IWL_BF_ESCAPE_TIMER_MIN 0 -#define IWL_BA_ESCAPE_TIMER_DEFAULT 3 +#define IWL_BA_ESCAPE_TIMER_DEFAULT 6 +#define IWL_BA_ESCAPE_TIMER_D3 6 #define IWL_BA_ESCAPE_TIMER_MAX 1024 #define IWL_BA_ESCAPE_TIMER_MIN 0 diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 30306a9..4e7c9f2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -123,6 +123,9 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, if (!mvmvif->bf_enabled) return 0; + if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); } -- cgit v0.10.2 From 6161b02b934bcf013f3bd7468222e24d4d597fbf Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 23 Jul 2013 14:03:39 +0200 Subject: iwlwifi: list Emmanuel in maintainers file Wey-Yi has moved on a while ago, but we forgot to update the maintainers file. Since Emmanuel is de facto maintaining the driver along with myself now, list him instead. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/MAINTAINERS b/MAINTAINERS index bf61e04..d44a5e8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4362,7 +4362,7 @@ F: drivers/net/wireless/iwlegacy/ INTEL WIRELESS WIFI LINK (iwlwifi) M: Johannes Berg -M: Wey-Yi Guy +M: Emmanuel Grumbach M: Intel Linux Wireless L: linux-wireless@vger.kernel.org W: http://intellinuxwireless.org -- cgit v0.10.2 From 6be497f29e207dcfb5d00a692c4e1d4e0fdcac3c Mon Sep 17 00:00:00 2001 From: Eytan Lifshitz Date: Wed, 24 Jul 2013 14:49:18 +0300 Subject: iwlwifi: mvm: add high temperature SKU thermal throttling parameters When the NIC is expected to operate in high temperature, it is advisable to put more aggresive thermal throttling parameters, in order to prevent CT-kill. Signed-off-by: eytan lifshitz Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 7f61674..76e14c0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -127,6 +127,16 @@ const struct iwl_cfg iwl7260_2ac_cfg = { .nvm_calib_ver = IWL7260_TX_POWER_VERSION, }; +const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { + .name = "Intel(R) Dual Band Wireless AC 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .high_temp = true, +}; + const struct iwl_cfg iwl7260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7260", .fw_name_pre = IWL7260_FW_PRE, diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 87fe955..e4d370b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -206,6 +206,7 @@ struct iwl_eeprom_params { * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device + * @high_temp: Is this NIC is designated to be in high temperature. * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -234,6 +235,7 @@ struct iwl_cfg { enum iwl_led_mode led_mode; const bool rx_with_siso_diversity; const bool internal_wimax_coex; + bool high_temp; }; /* @@ -284,6 +286,7 @@ extern const struct iwl_cfg iwl135_bgn_cfg; #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) extern const struct iwl_cfg iwl7260_2ac_cfg; +extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; extern const struct iwl_cfg iwl7260_2n_cfg; extern const struct iwl_cfg iwl7260_n_cfg; extern const struct iwl_cfg iwl3160_2ac_cfg; diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c index f5ba185..1f3282d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -512,12 +512,39 @@ static const struct iwl_tt_params iwl7000_tt_params = { .support_tx_backoff = true, }; +static const struct iwl_tt_params iwl7000_high_temp_tt_params = { + .ct_kill_entry = 118, + .ct_kill_exit = 96, + .ct_kill_duration = 5, + .dynamic_smps_entry = 114, + .dynamic_smps_exit = 110, + .tx_protection_entry = 114, + .tx_protection_exit = 108, + .tx_backoff = { + {.temperature = 112, .backoff = 300}, + {.temperature = 113, .backoff = 800}, + {.temperature = 114, .backoff = 1500}, + {.temperature = 115, .backoff = 3000}, + {.temperature = 116, .backoff = 5000}, + {.temperature = 117, .backoff = 10000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + void iwl_mvm_tt_initialize(struct iwl_mvm *mvm) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); - tt->params = &iwl7000_tt_params; + + if (mvm->cfg->high_temp) + tt->params = &iwl7000_high_temp_tt_params; + else + tt->params = &iwl7000_tt_params; + tt->throttle = false; INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); } diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 81f3ea5..db6be24 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -272,9 +272,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, -- cgit v0.10.2 From 147fc9be81d10e6e863323c0b54e140b42fd1ed6 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 28 Jul 2013 11:00:52 +0300 Subject: iwlwifi: mvm: advertise support for DYNAMIC / STATIC SMPS This feature is fully supported by iwlwmvm, so advertise it. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 8e4e79a..76cdba6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -153,7 +153,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_HW_SUPPORTS_DYNAMIC_PS | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_TIMING_BEACON_ONLY | - IEEE80211_HW_CONNECTION_MONITOR; + IEEE80211_HW_CONNECTION_MONITOR | + IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; hw->queues = IWL_MVM_FIRST_AGG_QUEUE; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; -- cgit v0.10.2 From 8f58332b30c9a25794514bf612aff5372173afd8 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Sat, 27 Jul 2013 06:25:38 +0000 Subject: ixgbe: add support for quad-port x520 adapter This is a x520 based quad-port (4x10Gbps) NIC with a single QSFP+ connector. Changes were required to our identify functions due to different eeprom address which is also included here. Signed-off-by: Don Skidmore Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index d011d5c..a26f3fe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1331,7 +1331,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = { static struct ixgbe_phy_operations phy_ops_82598 = { .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_sfp_module_generic, + .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82598, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 9d627fd..207f68f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -58,6 +58,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) { @@ -219,6 +223,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = 0; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = true; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; + } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); @@ -397,6 +420,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_LS: media_type = ixgbe_media_type_fiber_lco; break; + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + media_type = ixgbe_media_type_fiber_qsfp; + break; default: media_type = ixgbe_media_type_unknown; break; @@ -1951,7 +1977,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) goto out; else - status = ixgbe_identify_sfp_module_generic(hw); + status = ixgbe_identify_module_generic(hw); } /* Set PHY type none if no PHY detected */ @@ -2057,10 +2083,12 @@ sfp_check: switch (hw->phy.type) { case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; break; case ixgbe_phy_sfp_ftl_active: case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_qsfp_active_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; break; case ixgbe_phy_sfp_avago: @@ -2078,6 +2106,15 @@ sfp_check: else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; break; + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; default: break; } @@ -2315,6 +2352,112 @@ reset_pipeline_out: return ret_val; } +/** + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +/** + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, @@ -2379,7 +2522,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { static struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, - .identify_sfp = &ixgbe_identify_sfp_module_generic, + .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82599, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 785eafa..136de7d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, @@ -208,6 +209,7 @@ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: return true; default: return false; @@ -7347,6 +7349,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) */ switch (hw->device_id) { case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: physfns = 4; break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 71f554a..369eef5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -841,9 +841,35 @@ out: } /** - * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * ixgbe_identify_module_generic - Identifies module type * @hw: pointer to hardware structure * + * Determines HW type and calls appropriate function. + **/ +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_SFP_NOT_PRESENT; + + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + status = ixgbe_identify_sfp_module_generic(hw); + break; + case ixgbe_media_type_fiber_qsfp: + status = ixgbe_identify_qsfp_module_generic(hw); + break; + default: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + break; + } + + return status; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure +* * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) @@ -1122,6 +1148,156 @@ err_read_i2c_eeprom: } /** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u16 enforce_sfp = 0; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + hw->phy.id = identifier; + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_passive_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; + } else if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1; + } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) { + status = 0; + } else { + if (hw->allow_unsupported_sfp == true) { + e_warn(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = 0; + } else { + hw_dbg(hw, + "QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = 0; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence * @hw: pointer to hardware structure * @list_offset: offset to the SFP ID list diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 647203a..138dadd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -33,19 +33,25 @@ #define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 /* EEPROM byte offsets */ -#define IXGBE_SFF_IDENTIFIER 0x0 -#define IXGBE_SFF_IDENTIFIER_SFP 0x3 -#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 -#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 -#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 -#define IXGBE_SFF_1GBE_COMP_CODES 0x6 -#define IXGBE_SFF_10GBE_COMP_CODES 0x3 -#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 -#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C -#define IXGBE_SFF_SFF_8472_SWAP 0x5C -#define IXGBE_SFF_SFF_8472_COMP 0x5E -#define IXGBE_SFF_SFF_8472_OSCB 0x6E -#define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 /* Bitmasks */ #define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 @@ -60,6 +66,8 @@ #define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 #define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 @@ -130,7 +138,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 4c91ea6..161ff18 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -69,6 +69,7 @@ #define IXGBE_DEV_ID_82599_LS 0x154F #define IXGBE_DEV_ID_X540T 0x1528 #define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 #define IXGBE_DEV_ID_X540T1 0x1560 /* VF Device IDs */ @@ -1520,9 +1521,11 @@ enum { #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ #define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ #define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ #define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ #define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ /* LEDCTL Bit Masks */ #define IXGBE_LED_IVRT_BASE 0x00000040 @@ -2582,6 +2585,10 @@ enum ixgbe_phy_type { ixgbe_phy_sfp_ftl_active, ixgbe_phy_sfp_unknown, ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, ixgbe_phy_generic }; @@ -2623,6 +2630,7 @@ enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, ixgbe_media_type_fiber_fixed, + ixgbe_media_type_fiber_qsfp, ixgbe_media_type_fiber_lco, ixgbe_media_type_copper, ixgbe_media_type_backplane, @@ -2956,6 +2964,7 @@ struct ixgbe_phy_info { bool smart_speed_active; bool multispeed_fiber; bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; }; #include "ixgbe_mbx.h" -- cgit v0.10.2 From 5774c94aceade9eadc311957fe31322cc3ad2016 Mon Sep 17 00:00:00 2001 From: Phil Oester Date: Thu, 11 Jul 2013 12:06:58 -0700 Subject: netfilter: xt_addrtype: fix trivial typo Fix typo in error message. Signed-off-by: Phil Oester Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index 68ff29f..fab6eea 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c @@ -202,7 +202,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) return -EINVAL; } if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { - pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n"); + pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n"); return -EINVAL; } if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { -- cgit v0.10.2 From 6704af53fc3c5d4113d67d5ff40943d420966cd8 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Sun, 28 Jul 2013 22:54:07 +0200 Subject: netfilter: nf_conntrack: remove net_ratelimit() for LOG_INVALID() Logging of invalid packets has to be explicitly enabled. Rate-limiting these messages is inconsistent with other netfilter logging features and makes debugging harder. Signed-off-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 914d8d9..b411d7b 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -148,17 +148,10 @@ extern int nf_ct_port_nlattr_tuple_size(void); extern const struct nla_policy nf_ct_port_nla_policy[]; #ifdef CONFIG_SYSCTL -#ifdef DEBUG_INVALID_PACKETS #define LOG_INVALID(net, proto) \ ((net)->ct.sysctl_log_invalid == (proto) || \ (net)->ct.sysctl_log_invalid == IPPROTO_RAW) #else -#define LOG_INVALID(net, proto) \ - (((net)->ct.sysctl_log_invalid == (proto) || \ - (net)->ct.sysctl_log_invalid == IPPROTO_RAW) \ - && net_ratelimit()) -#endif -#else static inline int LOG_INVALID(struct net *net, int proto) { return 0; } #endif /* CONFIG_SYSCTL */ -- cgit v0.10.2 From 312a0c16c1fa9dd7cb5af413cf73b2fe2806c962 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Sun, 28 Jul 2013 22:54:08 +0200 Subject: netfilter: nf_conntrack: constify sk_buff argument to nf_ct_attach() Signed-off-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index de70f7b..f4bbf2c 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -314,8 +314,8 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) #endif /*CONFIG_NETFILTER*/ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu; -extern void nf_ct_attach(struct sk_buff *, struct sk_buff *); +extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; +extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *); extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; struct nf_conn; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 2217363..593b16e 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -234,12 +234,13 @@ EXPORT_SYMBOL(skb_make_writable); /* This does not belong here, but locally generated errors need it if connection tracking in use: without this, connection may not be in hash table, and hence manufactured ICMP or RST packets will not be associated with it. */ -void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly; +void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) + __rcu __read_mostly; EXPORT_SYMBOL(ip_ct_attach); -void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) +void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) { - void (*attach)(struct sk_buff *, struct sk_buff *); + void (*attach)(struct sk_buff *, const struct sk_buff *); if (skb->nfct) { rcu_read_lock(); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0283bae..d32afaf 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1192,7 +1192,7 @@ EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); #endif /* Used by ipt_REJECT and ip6t_REJECT. */ -static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) +static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; -- cgit v0.10.2 From 5813a8eb47915e051059562f22ffa521404f6e19 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 29 Jul 2013 15:41:50 +0200 Subject: netfilter: connlabels: remove unneeded includes leftovers from the (never merged) v1 patch. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c index 355d2ef..bb53f12 100644 --- a/net/netfilter/nf_conntrack_labels.c +++ b/net/netfilter/nf_conntrack_labels.c @@ -8,12 +8,8 @@ * published by the Free Software Foundation. */ -#include #include -#include -#include #include -#include #include #include -- cgit v0.10.2 From 957bec36855f97cc5797fbaaf68b11ac7454df2d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 29 Jul 2013 15:41:51 +0200 Subject: netfilter: nf_queue: relax NFQA_CT attribute check Allow modifying attributes of the conntrack associated with a packet without first requesting ct data via CFG_F_CONNTRACK or extra nfnetlink_conntrack socket. Also remove unneded rcu_read_lock; the entire function is already protected by rcu. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 971ea14..ec9de12 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -987,8 +987,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, if (entry == NULL) return -ENOENT; - rcu_read_lock(); - if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK)) + if (nfqa[NFQA_CT]) ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo); if (nfqa[NFQA_PAYLOAD]) { @@ -1002,7 +1001,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, if (ct) nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff); } - rcu_read_unlock(); if (nfqa[NFQA_MARK]) entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); -- cgit v0.10.2 From fd158d79d33d3c8b693e3e2d8c0e3068d529c2dc Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 29 Jul 2013 15:41:52 +0200 Subject: netfilter: tproxy: remove nf_tproxy_core, keep tw sk assigned to skb The module was "permanent", due to the special tproxy skb->destructor. Nowadays we have tcp early demux and its sock_edemux destructor in networking core which can be used instead. Thanks to early demux changes the input path now also handles "skb->sk is tw socket" correctly, so this no longer needs the special handling introduced with commit d503b30bd648b3cb4e5f50b65d27e389960cc6d9 (netfilter: tproxy: do not assign timewait sockets to skb->sk). Thus: - move assign_sock function to where its needed - don't prevent timewait sockets from being assigned to the skb - remove nf_tproxy_core. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/Documentation/networking/tproxy.txt b/Documentation/networking/tproxy.txt index 7b5996d..ec11429 100644 --- a/Documentation/networking/tproxy.txt +++ b/Documentation/networking/tproxy.txt @@ -2,9 +2,8 @@ Transparent proxy support ========================= This feature adds Linux 2.2-like transparent proxy support to current kernels. -To use it, enable NETFILTER_TPROXY, the socket match and the TPROXY target in -your kernel config. You will need policy routing too, so be sure to enable that -as well. +To use it, enable the socket match and the TPROXY target in your kernel config. +You will need policy routing too, so be sure to enable that as well. 1. Making non-local sockets work diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h index 36d9379..975ffa4 100644 --- a/include/net/netfilter/nf_tproxy_core.h +++ b/include/net/netfilter/nf_tproxy_core.h @@ -203,8 +203,4 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, } #endif -/* assign a socket to the skb -- consumes sk */ -void -nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); - #endif diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 56d22ca..c45fc1a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -410,20 +410,6 @@ config NF_NAT_TFTP endif # NF_CONNTRACK -# transparent proxy support -config NETFILTER_TPROXY - tristate "Transparent proxying support" - depends on IP_NF_MANGLE - depends on NETFILTER_ADVANCED - help - This option enables transparent proxying support, that is, - support for handling non-locally bound IPv4 TCP and UDP sockets. - For it to work you will have to configure certain iptables rules - and use policy routing. For more information on how to set it up - see Documentation/networking/tproxy.txt. - - To compile it as a module, choose M here. If unsure, say N. - config NETFILTER_XTABLES tristate "Netfilter Xtables support (required for ip_tables)" default m if NETFILTER_ADVANCED=n @@ -720,10 +706,10 @@ config NETFILTER_XT_TARGET_TEE this clone be rerouted to another nexthop. config NETFILTER_XT_TARGET_TPROXY - tristate '"TPROXY" target support' - depends on NETFILTER_TPROXY + tristate '"TPROXY" target transparent proxying support' depends on NETFILTER_XTABLES depends on NETFILTER_ADVANCED + depends on IP_NF_MANGLE select NF_DEFRAG_IPV4 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES help @@ -731,6 +717,9 @@ config NETFILTER_XT_TARGET_TPROXY REDIRECT. It can only be used in the mangle table and is useful to redirect traffic to a transparent proxy. It does _not_ depend on Netfilter connection tracking and NAT, unlike REDIRECT. + For it to work you will have to configure certain iptables rules + and use policy routing. For more information on how to set it up + see Documentation/networking/tproxy.txt. To compile it as a module, choose M here. If unsure, say N. @@ -1180,7 +1169,6 @@ config NETFILTER_XT_MATCH_SCTP config NETFILTER_XT_MATCH_SOCKET tristate '"socket" match support' - depends on NETFILTER_TPROXY depends on NETFILTER_XTABLES depends on NETFILTER_ADVANCED depends on !NF_CONNTRACK || NF_CONNTRACK diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index a1abf87..ebfa7dc 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -61,9 +61,6 @@ obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o -# transparent proxy support -obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o - # generic X tables obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c deleted file mode 100644 index 474d621..0000000 --- a/net/netfilter/nf_tproxy_core.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Transparent proxy support for Linux/iptables - * - * Copyright (c) 2006-2007 BalaBit IT Ltd. - * Author: Balazs Scheidler, Krisztian Kovacs - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include - -#include -#include -#include -#include -#include - - -static void -nf_tproxy_destructor(struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - - skb->sk = NULL; - skb->destructor = NULL; - - if (sk) - sock_put(sk); -} - -/* consumes sk */ -void -nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) -{ - /* assigning tw sockets complicates things; most - * skb->sk->X checks would have to test sk->sk_state first */ - if (sk->sk_state == TCP_TIME_WAIT) { - inet_twsk_put(inet_twsk(sk)); - return; - } - - skb_orphan(skb); - skb->sk = sk; - skb->destructor = nf_tproxy_destructor; -} -EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); - -static int __init nf_tproxy_init(void) -{ - pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n"); - pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n"); - return 0; -} - -module_init(nf_tproxy_init); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Krisztian Kovacs"); -MODULE_DESCRIPTION("Transparent proxy support core routines"); diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index d7f1953..17c40de 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -117,6 +117,15 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport, return sk; } +/* assign a socket to the skb -- consumes sk */ +static void +nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sock_edemux; +} + static unsigned int tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, u_int32_t mark_mask, u_int32_t mark_value) -- cgit v0.10.2 From 93742cf8af9dd3b053242b273040aa35fcbf93b3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 29 Jul 2013 15:41:53 +0200 Subject: netfilter: tproxy: remove nf_tproxy_core.h We've removed nf_tproxy_core.ko, so also remove its header. The lookup helpers are split and then moved to tproxy target/socket match. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h deleted file mode 100644 index 975ffa4..0000000 --- a/include/net/netfilter/nf_tproxy_core.h +++ /dev/null @@ -1,206 +0,0 @@ -#ifndef _NF_TPROXY_CORE_H -#define _NF_TPROXY_CORE_H - -#include -#include -#include -#include -#include -#include -#include - -#define NFT_LOOKUP_ANY 0 -#define NFT_LOOKUP_LISTENER 1 -#define NFT_LOOKUP_ESTABLISHED 2 - -/* look up and get a reference to a matching socket */ - - -/* This function is used by the 'TPROXY' target and the 'socket' - * match. The following lookups are supported: - * - * Explicit TProxy target rule - * =========================== - * - * This is used when the user wants to intercept a connection matching - * an explicit iptables rule. In this case the sockets are assumed - * matching in preference order: - * - * - match: if there's a fully established connection matching the - * _packet_ tuple, it is returned, assuming the redirection - * already took place and we process a packet belonging to an - * established connection - * - * - match: if there's a listening socket matching the redirection - * (e.g. on-port & on-ip of the connection), it is returned, - * regardless if it was bound to 0.0.0.0 or an explicit - * address. The reasoning is that if there's an explicit rule, it - * does not really matter if the listener is bound to an interface - * or to 0. The user already stated that he wants redirection - * (since he added the rule). - * - * "socket" match based redirection (no specific rule) - * =================================================== - * - * There are connections with dynamic endpoints (e.g. FTP data - * connection) that the user is unable to add explicit rules - * for. These are taken care of by a generic "socket" rule. It is - * assumed that the proxy application is trusted to open such - * connections without explicit iptables rule (except of course the - * generic 'socket' rule). In this case the following sockets are - * matched in preference order: - * - * - match: if there's a fully established connection matching the - * _packet_ tuple - * - * - match: if there's a non-zero bound listener (possibly with a - * non-local address) We don't accept zero-bound listeners, since - * then local services could intercept traffic going through the - * box. - * - * Please note that there's an overlap between what a TPROXY target - * and a socket match will match. Normally if you have both rules the - * "socket" match will be the first one, effectively all packets - * belonging to established connections going through that one. - */ -static inline struct sock * -nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, - const __be32 saddr, const __be32 daddr, - const __be16 sport, const __be16 dport, - const struct net_device *in, int lookup_type) -{ - struct sock *sk; - - /* look up socket */ - switch (protocol) { - case IPPROTO_TCP: - switch (lookup_type) { - case NFT_LOOKUP_ANY: - sk = __inet_lookup(net, &tcp_hashinfo, - saddr, sport, daddr, dport, - in->ifindex); - break; - case NFT_LOOKUP_LISTENER: - sk = inet_lookup_listener(net, &tcp_hashinfo, - saddr, sport, - daddr, dport, - in->ifindex); - - /* NOTE: we return listeners even if bound to - * 0.0.0.0, those are filtered out in - * xt_socket, since xt_TPROXY needs 0 bound - * listeners too */ - - break; - case NFT_LOOKUP_ESTABLISHED: - sk = inet_lookup_established(net, &tcp_hashinfo, - saddr, sport, daddr, dport, - in->ifindex); - break; - default: - WARN_ON(1); - sk = NULL; - break; - } - break; - case IPPROTO_UDP: - sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, - in->ifindex); - if (sk && lookup_type != NFT_LOOKUP_ANY) { - int connected = (sk->sk_state == TCP_ESTABLISHED); - int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0); - - /* NOTE: we return listeners even if bound to - * 0.0.0.0, those are filtered out in - * xt_socket, since xt_TPROXY needs 0 bound - * listeners too */ - if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || - (lookup_type == NFT_LOOKUP_LISTENER && connected)) { - sock_put(sk); - sk = NULL; - } - } - break; - default: - WARN_ON(1); - sk = NULL; - } - - pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n", - protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk); - - return sk; -} - -#if IS_ENABLED(CONFIG_IPV6) -static inline struct sock * -nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, - const struct in6_addr *saddr, const struct in6_addr *daddr, - const __be16 sport, const __be16 dport, - const struct net_device *in, int lookup_type) -{ - struct sock *sk; - - /* look up socket */ - switch (protocol) { - case IPPROTO_TCP: - switch (lookup_type) { - case NFT_LOOKUP_ANY: - sk = inet6_lookup(net, &tcp_hashinfo, - saddr, sport, daddr, dport, - in->ifindex); - break; - case NFT_LOOKUP_LISTENER: - sk = inet6_lookup_listener(net, &tcp_hashinfo, - saddr, sport, - daddr, ntohs(dport), - in->ifindex); - - /* NOTE: we return listeners even if bound to - * 0.0.0.0, those are filtered out in - * xt_socket, since xt_TPROXY needs 0 bound - * listeners too */ - - break; - case NFT_LOOKUP_ESTABLISHED: - sk = __inet6_lookup_established(net, &tcp_hashinfo, - saddr, sport, daddr, ntohs(dport), - in->ifindex); - break; - default: - WARN_ON(1); - sk = NULL; - break; - } - break; - case IPPROTO_UDP: - sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, - in->ifindex); - if (sk && lookup_type != NFT_LOOKUP_ANY) { - int connected = (sk->sk_state == TCP_ESTABLISHED); - int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr); - - /* NOTE: we return listeners even if bound to - * 0.0.0.0, those are filtered out in - * xt_socket, since xt_TPROXY needs 0 bound - * listeners too */ - if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || - (lookup_type == NFT_LOOKUP_LISTENER && connected)) { - sock_put(sk); - sk = NULL; - } - } - break; - default: - WARN_ON(1); - sk = NULL; - } - - pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n", - protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk); - - return sk; -} -#endif - -#endif diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 17c40de..851383a 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -15,7 +15,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -26,13 +28,18 @@ #define XT_TPROXY_HAVE_IPV6 1 #include #include +#include #include #include #endif -#include #include +enum nf_tproxy_lookup_t { + NFT_LOOKUP_LISTENER, + NFT_LOOKUP_ESTABLISHED, +}; + static bool tproxy_sk_is_transparent(struct sock *sk) { if (sk->sk_state != TCP_TIME_WAIT) { @@ -68,6 +75,157 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) return laddr ? laddr : daddr; } +/* + * This is used when the user wants to intercept a connection matching + * an explicit iptables rule. In this case the sockets are assumed + * matching in preference order: + * + * - match: if there's a fully established connection matching the + * _packet_ tuple, it is returned, assuming the redirection + * already took place and we process a packet belonging to an + * established connection + * + * - match: if there's a listening socket matching the redirection + * (e.g. on-port & on-ip of the connection), it is returned, + * regardless if it was bound to 0.0.0.0 or an explicit + * address. The reasoning is that if there's an explicit rule, it + * does not really matter if the listener is bound to an interface + * or to 0. The user already stated that he wants redirection + * (since he added the rule). + * + * Please note that there's an overlap between what a TPROXY target + * and a socket match will match. Normally if you have both rules the + * "socket" match will be the first one, effectively all packets + * belonging to established connections going through that one. + */ +static inline struct sock * +nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, + const __be32 saddr, const __be32 daddr, + const __be16 sport, const __be16 dport, + const struct net_device *in, + const enum nf_tproxy_lookup_t lookup_type) +{ + struct sock *sk; + + switch (protocol) { + case IPPROTO_TCP: + switch (lookup_type) { + case NFT_LOOKUP_LISTENER: + sk = inet_lookup_listener(net, &tcp_hashinfo, + saddr, sport, + daddr, dport, + in->ifindex); + + /* NOTE: we return listeners even if bound to + * 0.0.0.0, those are filtered out in + * xt_socket, since xt_TPROXY needs 0 bound + * listeners too + */ + break; + case NFT_LOOKUP_ESTABLISHED: + sk = inet_lookup_established(net, &tcp_hashinfo, + saddr, sport, daddr, dport, + in->ifindex); + break; + default: + BUG(); + } + break; + case IPPROTO_UDP: + sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, + in->ifindex); + if (sk) { + int connected = (sk->sk_state == TCP_ESTABLISHED); + int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0); + + /* NOTE: we return listeners even if bound to + * 0.0.0.0, those are filtered out in + * xt_socket, since xt_TPROXY needs 0 bound + * listeners too + */ + if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || + (lookup_type == NFT_LOOKUP_LISTENER && connected)) { + sock_put(sk); + sk = NULL; + } + } + break; + default: + WARN_ON(1); + sk = NULL; + } + + pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n", + protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk); + + return sk; +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline struct sock * +nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, + const struct in6_addr *saddr, const struct in6_addr *daddr, + const __be16 sport, const __be16 dport, + const struct net_device *in, + const enum nf_tproxy_lookup_t lookup_type) +{ + struct sock *sk; + + switch (protocol) { + case IPPROTO_TCP: + switch (lookup_type) { + case NFT_LOOKUP_LISTENER: + sk = inet6_lookup_listener(net, &tcp_hashinfo, + saddr, sport, + daddr, ntohs(dport), + in->ifindex); + + /* NOTE: we return listeners even if bound to + * 0.0.0.0, those are filtered out in + * xt_socket, since xt_TPROXY needs 0 bound + * listeners too + */ + break; + case NFT_LOOKUP_ESTABLISHED: + sk = __inet6_lookup_established(net, &tcp_hashinfo, + saddr, sport, daddr, ntohs(dport), + in->ifindex); + break; + default: + BUG(); + } + break; + case IPPROTO_UDP: + sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, + in->ifindex); + if (sk) { + int connected = (sk->sk_state == TCP_ESTABLISHED); + int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr); + + /* NOTE: we return listeners even if bound to + * 0.0.0.0, those are filtered out in + * xt_socket, since xt_TPROXY needs 0 bound + * listeners too + */ + if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || + (lookup_type == NFT_LOOKUP_LISTENER && connected)) { + sock_put(sk); + sk = NULL; + } + } + break; + default: + WARN_ON(1); + sk = NULL; + } + + pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n", + protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk); + + return sk; +} +#endif + /** * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index f8b7191..a7dd108 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -19,12 +19,12 @@ #include #include #include -#include #include #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #define XT_SOCKET_HAVE_IPV6 1 #include +#include #include #endif @@ -101,6 +101,43 @@ extract_icmp4_fields(const struct sk_buff *skb, return 0; } +/* "socket" match based redirection (no specific rule) + * =================================================== + * + * There are connections with dynamic endpoints (e.g. FTP data + * connection) that the user is unable to add explicit rules + * for. These are taken care of by a generic "socket" rule. It is + * assumed that the proxy application is trusted to open such + * connections without explicit iptables rule (except of course the + * generic 'socket' rule). In this case the following sockets are + * matched in preference order: + * + * - match: if there's a fully established connection matching the + * _packet_ tuple + * + * - match: if there's a non-zero bound listener (possibly with a + * non-local address) We don't accept zero-bound listeners, since + * then local services could intercept traffic going through the + * box. + */ +static struct sock * +xt_socket_get_sock_v4(struct net *net, const u8 protocol, + const __be32 saddr, const __be32 daddr, + const __be16 sport, const __be16 dport, + const struct net_device *in) +{ + switch (protocol) { + case IPPROTO_TCP: + return __inet_lookup(net, &tcp_hashinfo, + saddr, sport, daddr, dport, + in->ifindex); + case IPPROTO_UDP: + return udp4_lib_lookup(net, saddr, sport, daddr, dport, + in->ifindex); + } + return NULL; +} + static bool socket_match(const struct sk_buff *skb, struct xt_action_param *par, const struct xt_socket_mtinfo1 *info) @@ -156,9 +193,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, #endif if (!sk) - sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, + sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr, sport, dport, - par->in, NFT_LOOKUP_ANY); + par->in); if (sk) { bool wildcard; bool transparent = true; @@ -261,6 +298,25 @@ extract_icmp6_fields(const struct sk_buff *skb, return 0; } +static struct sock * +xt_socket_get_sock_v6(struct net *net, const u8 protocol, + const struct in6_addr *saddr, const struct in6_addr *daddr, + const __be16 sport, const __be16 dport, + const struct net_device *in) +{ + switch (protocol) { + case IPPROTO_TCP: + return inet6_lookup(net, &tcp_hashinfo, + saddr, sport, daddr, dport, + in->ifindex); + case IPPROTO_UDP: + return udp6_lib_lookup(net, saddr, sport, daddr, dport, + in->ifindex); + } + + return NULL; +} + static bool socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) { @@ -298,9 +354,9 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) } if (!sk) - sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, + sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto, saddr, daddr, sport, dport, - par->in, NFT_LOOKUP_ANY); + par->in); if (sk) { bool wildcard; bool transparent = true; -- cgit v0.10.2 From 02982c27ba1e1bd9f9d4747214e19ca83aa88d0e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 29 Jul 2013 15:41:54 +0200 Subject: netfilter: nf_conntrack: remove duplicate code in ctnetlink ctnetlink contains copy-paste code from death_by_timeout. In order to avoid changing both places in upcoming event delivery patch, export death_by_timeout functionality and use it in the ctnetlink code. Based on earlier patch from Pablo Neira. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 644d9c2..939aced 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -181,8 +181,7 @@ __nf_conntrack_find(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple); extern int nf_conntrack_hash_check_insert(struct nf_conn *ct); -extern void nf_ct_delete_from_lists(struct nf_conn *ct); -extern void nf_ct_dying_timeout(struct nf_conn *ct); +bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report); extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index d32afaf..089e408 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -238,7 +238,7 @@ destroy_conntrack(struct nf_conntrack *nfct) nf_conntrack_free(ct); } -void nf_ct_delete_from_lists(struct nf_conn *ct) +static void nf_ct_delete_from_lists(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); @@ -253,7 +253,6 @@ void nf_ct_delete_from_lists(struct nf_conn *ct) &net->ct.dying); spin_unlock_bh(&nf_conntrack_lock); } -EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); static void death_by_event(unsigned long ul_conntrack) { @@ -275,7 +274,7 @@ static void death_by_event(unsigned long ul_conntrack) nf_ct_put(ct); } -void nf_ct_dying_timeout(struct nf_conn *ct) +static void nf_ct_dying_timeout(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); @@ -288,27 +287,33 @@ void nf_ct_dying_timeout(struct nf_conn *ct) (prandom_u32() % net->ct.sysctl_events_retry_timeout); add_timer(&ecache->timeout); } -EXPORT_SYMBOL_GPL(nf_ct_dying_timeout); -static void death_by_timeout(unsigned long ul_conntrack) +bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) { - struct nf_conn *ct = (void *)ul_conntrack; struct nf_conn_tstamp *tstamp; tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_to_ns(ktime_get_real()); - if (!test_bit(IPS_DYING_BIT, &ct->status) && - unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { + if (!nf_ct_is_dying(ct) && + unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct, + portid, report) < 0)) { /* destroy event was not delivered */ nf_ct_delete_from_lists(ct); nf_ct_dying_timeout(ct); - return; + return false; } set_bit(IPS_DYING_BIT, &ct->status); nf_ct_delete_from_lists(ct); nf_ct_put(ct); + return true; +} +EXPORT_SYMBOL_GPL(nf_ct_delete); + +static void death_by_timeout(unsigned long ul_conntrack) +{ + nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); } /* @@ -643,10 +648,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) return dropped; if (del_timer(&ct->timeout)) { - death_by_timeout((unsigned long)ct); - /* Check if we indeed killed this entry. Reliable event - delivery may have inserted it into the dying list. */ - if (test_bit(IPS_DYING_BIT, &ct->status)) { + if (nf_ct_delete(ct, 0, 0)) { dropped = 1; NF_CT_STAT_INC_ATOMIC(net, early_drop); } @@ -1253,6 +1255,7 @@ void nf_ct_iterate_cleanup(struct net *net, /* Time to push up daises... */ if (del_timer(&ct->timeout)) death_by_timeout((unsigned long)ct); + /* ... else the timer will get him soon. */ nf_ct_put(ct); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index edc410e..e842c0d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1038,21 +1038,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, } } - if (del_timer(&ct->timeout)) { - if (nf_conntrack_event_report(IPCT_DESTROY, ct, - NETLINK_CB(skb).portid, - nlmsg_report(nlh)) < 0) { - nf_ct_delete_from_lists(ct); - /* we failed to report the event, try later */ - nf_ct_dying_timeout(ct); - nf_ct_put(ct); - return 0; - } - /* death_by_timeout would report the event again */ - set_bit(IPS_DYING_BIT, &ct->status); - nf_ct_delete_from_lists(ct); - nf_ct_put(ct); - } + if (del_timer(&ct->timeout)) + nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh)); + nf_ct_put(ct); return 0; -- cgit v0.10.2 From 0658cdc8f3babb4a441f5a803a0b644fafcbf9ef Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Sun, 28 Jul 2013 22:54:09 +0200 Subject: netfilter: nf_nat: fix locking in nf_nat_seq_adjust() nf_nat_seq_adjust() needs to grab nf_nat_seqofs_lock to protect against concurrent changes to the sequence adjustment data. Signed-off-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index 85e20a9..a7262ed 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -373,6 +373,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, s16 seqoff, ackoff; struct nf_conn_nat *nat = nfct_nat(ct); struct nf_nat_seq *this_way, *other_way; + int res; dir = CTINFO2DIR(ctinfo); @@ -383,6 +384,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, return 0; tcph = (void *)skb->data + protoff; + spin_lock_bh(&nf_nat_seqofs_lock); if (after(ntohl(tcph->seq), this_way->correction_pos)) seqoff = this_way->offset_after; else @@ -407,7 +409,10 @@ nf_nat_seq_adjust(struct sk_buff *skb, tcph->seq = newseq; tcph->ack_seq = newack; - return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo); + res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo); + spin_unlock_bh(&nf_nat_seqofs_lock); + + return res; } /* Setup NAT on this expected conntrack so it follows master. */ -- cgit v0.10.2 From 2d89c68ac78ae432038ef23371d2fa949d725d43 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Sun, 28 Jul 2013 22:54:10 +0200 Subject: netfilter: nf_nat: change sequence number adjustments to 32 bits Using 16 bits is too small, when many adjustments happen the offsets might overflow and break the connection. Signed-off-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index f4bbf2c..655d5d1 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -330,7 +330,7 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook; struct nfq_ct_nat_hook { void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, - u32 ctinfo, int off); + u32 ctinfo, s32 off); }; extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook; #else diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 939aced..e5eb8b6 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -234,7 +234,7 @@ static inline bool nf_ct_kill(struct nf_conn *ct) } /* These are for NAT. Icky. */ -extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, +extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq); diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index ad14a79..e244141 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -19,7 +19,7 @@ struct nf_nat_seq { u_int32_t correction_pos; /* sequence number offset before and after last modification */ - int16_t offset_before, offset_after; + int32_t offset_before, offset_after; }; #include diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index b4d6bfc..194c347 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -41,7 +41,7 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb, extern void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo, - __be32 seq, s16 off); + __be32 seq, s32 off); extern int nf_nat_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -56,11 +56,11 @@ extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, extern void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this); -extern s16 nf_nat_get_offset(const struct nf_conn *ct, +extern s32 nf_nat_get_offset(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq); extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, - u32 dir, int off); + u32 dir, s32 off); #endif diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 089e408..0934611 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1695,7 +1695,7 @@ err_stat: return ret; } -s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, +s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq); EXPORT_SYMBOL_GPL(nf_ct_nat_offset); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 7dcc376..8f308d8 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -496,7 +496,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, } #ifdef CONFIG_NF_NAT_NEEDED -static inline s16 nat_offset(const struct nf_conn *ct, +static inline s32 nat_offset(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq) { @@ -525,7 +525,7 @@ static bool tcp_in_window(const struct nf_conn *ct, struct ip_ct_tcp_state *receiver = &state->seen[!dir]; const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; __u32 seq, ack, sack, end, win, swin; - s16 receiver_offset; + s32 receiver_offset; bool res; /* diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index a7262ed..ff4a589 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -68,13 +68,13 @@ adjust_tcp_sequence(u32 seq, } /* Get the offset value, for conntrack */ -s16 nf_nat_get_offset(const struct nf_conn *ct, +s32 nf_nat_get_offset(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq) { struct nf_conn_nat *nat = nfct_nat(ct); struct nf_nat_seq *this_way; - s16 offset; + s32 offset; if (!nat) return 0; @@ -143,7 +143,7 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra) } void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo, - __be32 seq, s16 off) + __be32 seq, s32 off) { if (!off) return; @@ -370,7 +370,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, struct tcphdr *tcph; int dir; __be32 newseq, newack; - s16 seqoff, ackoff; + s32 seqoff, ackoff; struct nf_conn_nat *nat = nfct_nat(ct); struct nf_nat_seq *this_way, *other_way; int res; -- cgit v0.10.2 From 12e7ada385eada77854174ecaf469a0791277ddd Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Sun, 28 Jul 2013 22:54:11 +0200 Subject: netfilter: nf_nat: use per-conntrack locking for sequence number adjustments Get rid of the global lock and use per-conntrack locks for protecting the sequencen number adjustment data. Additionally saves one lock/unlock operation for every TCP packet. Signed-off-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index ff4a589..46b9baa 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -30,8 +30,6 @@ pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \ x->offset_before, x->offset_after, x->correction_pos); -static DEFINE_SPINLOCK(nf_nat_seqofs_lock); - /* Setup TCP sequence correction given this change at this sequence */ static inline void adjust_tcp_sequence(u32 seq, @@ -49,7 +47,7 @@ adjust_tcp_sequence(u32 seq, pr_debug("adjust_tcp_sequence: Seq_offset before: "); DUMP_OFFSET(this_way); - spin_lock_bh(&nf_nat_seqofs_lock); + spin_lock_bh(&ct->lock); /* SYN adjust. If it's uninitialized, or this is after last * correction, record it: we don't handle more than one @@ -61,31 +59,26 @@ adjust_tcp_sequence(u32 seq, this_way->offset_before = this_way->offset_after; this_way->offset_after += sizediff; } - spin_unlock_bh(&nf_nat_seqofs_lock); + spin_unlock_bh(&ct->lock); pr_debug("adjust_tcp_sequence: Seq_offset after: "); DUMP_OFFSET(this_way); } -/* Get the offset value, for conntrack */ +/* Get the offset value, for conntrack. Caller must have the conntrack locked */ s32 nf_nat_get_offset(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq) { struct nf_conn_nat *nat = nfct_nat(ct); struct nf_nat_seq *this_way; - s32 offset; if (!nat) return 0; this_way = &nat->seq[dir]; - spin_lock_bh(&nf_nat_seqofs_lock); - offset = after(seq, this_way->correction_pos) + return after(seq, this_way->correction_pos) ? this_way->offset_after : this_way->offset_before; - spin_unlock_bh(&nf_nat_seqofs_lock); - - return offset; } /* Frobs data inside this packet, which is linear. */ @@ -384,7 +377,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, return 0; tcph = (void *)skb->data + protoff; - spin_lock_bh(&nf_nat_seqofs_lock); + spin_lock_bh(&ct->lock); if (after(ntohl(tcph->seq), this_way->correction_pos)) seqoff = this_way->offset_after; else @@ -410,7 +403,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, tcph->ack_seq = newack; res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo); - spin_unlock_bh(&nf_nat_seqofs_lock); + spin_unlock_bh(&ct->lock); return res; } -- cgit v0.10.2 From c0155b2da4cd6583b1b729451249ca346e1c05a2 Mon Sep 17 00:00:00 2001 From: Dmitry Popov Date: Wed, 31 Jul 2013 13:39:45 +0400 Subject: tcp: Remove unused tcpct declarations and comments Remove declaration, 4 defines and confusing comment that are no longer used since 1a2c6181c4 ("tcp: Remove TCPCT"). Signed-off-by: Dmitry Popov Acked-by: Christoph Paasch Signed-off-by: David S. Miller diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9640803..d686334 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -107,7 +107,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) * only four options will fit in a standard TCP header */ #define TCP_NUM_SACKS 4 -struct tcp_cookie_values; struct tcp_request_sock_ops; struct tcp_request_sock { diff --git a/include/net/tcp.h b/include/net/tcp.h index 18fc999..27b652f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -192,10 +192,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOLEN_TIMESTAMP 10 #define TCPOLEN_MD5SIG 18 #define TCPOLEN_EXP_FASTOPEN_BASE 4 -#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */ -#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */ -#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN) -#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX) /* But this is what stacks really send out. */ #define TCPOLEN_TSTAMP_ALIGNED 12 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c27e813..ab64eea 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -410,10 +410,6 @@ void tcp_init_sock(struct sock *sk) icsk->icsk_sync_mss = tcp_sync_mss; - /* Presumed zeroed, in order of appearance: - * cookie_in_always, cookie_out_never, - * s_data_constant, s_data_in, s_data_out - */ sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; -- cgit v0.10.2 From ba361cb3d4c977e2b94b5d97905f66b4d48964de Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 31 Jul 2013 16:42:11 +0900 Subject: sh_eth: r8a7790: Handle the RFE (Receive FIFO overflow Error) interrupt The RFE interrupt is enabled for the r8a7790 but isn't handled, resulting in the interrupts core noticing unhandled interrupts, and eventually disabling the ethernet IRQ. Fix it by adding RFE to the bitmask of error interrupts to be handled for r8a7790. Signed-off-by: Laurent Pinchart Signed-off-by: Simon Horman Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index fedc0a0..9e2afe8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -403,8 +403,9 @@ static struct sh_eth_cpu_data r8a7790_data = { .eesipr_value = 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .apr = 1, .mpr = 1, -- cgit v0.10.2 From ca4c3fc24e293719fe7410c4e63da9b6bc633b83 Mon Sep 17 00:00:00 2001 From: "fan.du" Date: Tue, 30 Jul 2013 08:33:53 +0800 Subject: net: split rt_genid for ipv4 and ipv6 Current net name space has only one genid for both IPv4 and IPv6, it has below drawbacks: - Add/delete an IPv4 address will invalidate all IPv6 routing table entries. - Insert/remove XFRM policy will also invalidate both IPv4/IPv6 routing table entries even when the policy is only applied for one address family. Thus, this patch attempt to split one genid for two to cater for IPv4 and IPv6 separately in a fine granularity. Signed-off-by: Fan Du Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 84e37b1..1313456 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -119,7 +119,6 @@ struct net { struct netns_ipvs *ipvs; #endif struct sock *diag_nlsk; - atomic_t rt_genid; atomic_t fnhe_genid; }; @@ -333,14 +332,42 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header) } #endif -static inline int rt_genid(struct net *net) +static inline int rt_genid_ipv4(struct net *net) { - return atomic_read(&net->rt_genid); + return atomic_read(&net->ipv4.rt_genid); } -static inline void rt_genid_bump(struct net *net) +static inline void rt_genid_bump_ipv4(struct net *net) { - atomic_inc(&net->rt_genid); + atomic_inc(&net->ipv4.rt_genid); +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline int rt_genid_ipv6(struct net *net) +{ + return atomic_read(&net->ipv6.rt_genid); +} + +static inline void rt_genid_bump_ipv6(struct net *net) +{ + atomic_inc(&net->ipv6.rt_genid); +} +#else +static inline int rt_genid_ipv6(struct net *net) +{ + return 0; +} + +static inline void rt_genid_bump_ipv6(struct net *net) +{ +} +#endif + +/* For callers who don't really care about whether it's IPv4 or IPv6 */ +static inline void rt_genid_bump_all(struct net *net) +{ + rt_genid_bump_ipv4(net); + rt_genid_bump_ipv6(net); } static inline int fnhe_genid(struct net *net) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2ba9de8..bf2ec22 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -77,5 +77,6 @@ struct netns_ipv4 { struct fib_rules_ops *mr_rules_ops; #endif #endif + atomic_t rt_genid; }; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 005e2c2..0fb2401 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -72,6 +72,7 @@ struct netns_ipv6 { #endif #endif atomic_t dev_addr_genid; + atomic_t rt_genid; }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a9a54a2..e805481 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -435,12 +435,12 @@ static inline int ip_rt_proc_init(void) static inline bool rt_is_expired(const struct rtable *rth) { - return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); + return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); } void rt_cache_flush(struct net *net) { - rt_genid_bump(net); + rt_genid_bump_ipv4(net); } static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, @@ -1458,7 +1458,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, #endif rth->dst.output = ip_rt_bug; - rth->rt_genid = rt_genid(dev_net(dev)); + rth->rt_genid = rt_genid_ipv4(dev_net(dev)); rth->rt_flags = RTCF_MULTICAST; rth->rt_type = RTN_MULTICAST; rth->rt_is_input= 1; @@ -1589,7 +1589,7 @@ static int __mkroute_input(struct sk_buff *skb, goto cleanup; } - rth->rt_genid = rt_genid(dev_net(rth->dst.dev)); + rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev)); rth->rt_flags = flags; rth->rt_type = res->type; rth->rt_is_input = 1; @@ -1760,7 +1760,7 @@ local_input: rth->dst.tclassid = itag; #endif - rth->rt_genid = rt_genid(net); + rth->rt_genid = rt_genid_ipv4(net); rth->rt_flags = flags|RTCF_LOCAL; rth->rt_type = res.type; rth->rt_is_input = 1; @@ -1945,7 +1945,7 @@ add: rth->dst.output = ip_output; - rth->rt_genid = rt_genid(dev_net(dev_out)); + rth->rt_genid = rt_genid_ipv4(dev_net(dev_out)); rth->rt_flags = flags; rth->rt_type = type; rth->rt_is_input = 0; @@ -2227,7 +2227,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or rt->rt_iif = ort->rt_iif; rt->rt_pmtu = ort->rt_pmtu; - rt->rt_genid = rt_genid(net); + rt->rt_genid = rt_genid_ipv4(net); rt->rt_flags = ort->rt_flags; rt->rt_type = ort->rt_type; rt->rt_gateway = ort->rt_gateway; @@ -2665,7 +2665,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { static __net_init int rt_genid_init(struct net *net) { - atomic_set(&net->rt_genid, 0); + atomic_set(&net->ipv4.rt_genid, 0); atomic_set(&net->fnhe_genid, 0); get_random_bytes(&net->ipv4.dev_addr_genid, sizeof(net->ipv4.dev_addr_genid)); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index a5ac969..0d1a9b1 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -766,6 +766,7 @@ static int __net_init inet6_net_init(struct net *net) net->ipv6.sysctl.bindv6only = 0; net->ipv6.sysctl.icmpv6_time = 1*HZ; + atomic_set(&net->ipv6.rt_genid, 0); err = ipv6_init_mibs(net); if (err) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 74ab1f7..ce96163 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -283,7 +283,7 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net, memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); - rt->rt6i_genid = rt_genid(net); + rt->rt6i_genid = rt_genid_ipv6(net); INIT_LIST_HEAD(&rt->rt6i_siblings); } return rt; @@ -1061,7 +1061,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) * DST_OBSOLETE_FORCE_CHK which forces validation calls down * into this function always. */ - if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) + if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev))) return NULL; if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e52cab3..d8da6b8 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -660,7 +660,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) xfrm_pol_hold(policy); net->xfrm.policy_count[dir]++; atomic_inc(&flow_cache_genid); - rt_genid_bump(net); + + /* After previous checking, family can either be AF_INET or AF_INET6 */ + if (policy->family == AF_INET) + rt_genid_bump_ipv4(net); + else + rt_genid_bump_ipv6(net); + if (delpol) { xfrm_policy_requeue(delpol, policy); __xfrm_policy_unlink(delpol, dir); diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 65f67cb..6713f04 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h @@ -50,8 +50,13 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); static inline void selinux_xfrm_notify_policyload(void) { + struct net *net; + atomic_inc(&flow_cache_genid); - rt_genid_bump(&init_net); + rtnl_lock(); + for_each_net(net) + rt_genid_bump_all(net); + rtnl_unlock(); } #else static inline int selinux_xfrm_enabled(void) -- cgit v0.10.2 From f2f872f9272a79a1048877ea14c15576f46c225e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 30 Jul 2013 17:55:08 -0700 Subject: netem: Introduce skb_orphan_partial() helper Commit 547669d483e578 ("tcp: xps: fix reordering issues") added unexpected reorders in case netem is used in a MQ setup for high performance test bed. ETH=eth0 tc qd del dev $ETH root 2>/dev/null tc qd add dev $ETH root handle 1: mq for i in `seq 1 32` do tc qd add dev $ETH parent 1:$i netem delay 100ms done As all tcp packets are orphaned by netem, TCP stack believes it can set skb->ooo_okay on all packets. In order to allow producers to send more packets, we want to keep sk_wmem_alloc from reaching sk_sndbuf limit. We can do that by accounting one byte per skb in netem queues, so that TCP stack is not fooled too much. Tested: With above MQ/netem setup, scaling number of concurrent flows gives linear results and no reorders/retransmits lpq83:~# for n in 1 10 20 30 40 50 60 70 80 90 100 do echo -n "n:$n " ; ./super_netperf $n -H 10.7.7.84; done n:1 198.46 n:10 2002.69 n:20 4000.98 n:30 6006.35 n:40 8020.93 n:50 10032.3 n:60 12081.9 n:70 13971.3 n:80 16009.7 n:90 17117.3 n:100 17425.5 Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/include/net/sock.h b/include/net/sock.h index b9f2b09..53d4714 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1520,6 +1520,7 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); extern void sock_wfree(struct sk_buff *skb); +extern void skb_orphan_partial(struct sk_buff *skb); extern void sock_rfree(struct sk_buff *skb); extern void sock_edemux(struct sk_buff *skb); diff --git a/net/core/sock.c b/net/core/sock.c index 85e8de1..a753d97 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1576,6 +1576,25 @@ void sock_wfree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_wfree); +void skb_orphan_partial(struct sk_buff *skb) +{ + /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, + * so we do not completely orphan skb, but transfert all + * accounted bytes but one, to avoid unexpected reorders. + */ + if (skb->destructor == sock_wfree +#ifdef CONFIG_INET + || skb->destructor == tcp_wfree +#endif + ) { + atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); + skb->truesize = 1; + } else { + skb_orphan(skb); + } +} +EXPORT_SYMBOL(skb_orphan_partial); + /* * Read buffer destructor automatically called from kfree_skb. */ diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 82f6016..a6d788d 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -412,12 +412,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* If a delay is expected, orphan the skb. (orphaning usually takes * place at TX completion time, so _before_ the link transit delay) - * Ideally, this orphaning should be done after the rate limiting - * module, because this breaks TCP Small Queue, and other mechanisms - * based on socket sk_wmem_alloc. */ if (q->latency || q->jitter) - skb_orphan(skb); + skb_orphan_partial(skb); /* * If we need to duplicate packet, then re-insert at top of the -- cgit v0.10.2 From c34a761231b56dea4bd205cb9c29ffe37475d232 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 30 Jul 2013 16:11:15 -0700 Subject: net: skb_orphan() changes It is illegal to set skb->sk without corresponding destructor. Its therefore safe for skb_orphan() to not clear skb->sk if skb->destructor is not set. Also avoid clearing skb->destructor if already NULL. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5afefa0..a95547a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1805,10 +1805,11 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) */ static inline void skb_orphan(struct sk_buff *skb) { - if (skb->destructor) + if (skb->destructor) { skb->destructor(skb); - skb->destructor = NULL; - skb->sk = NULL; + skb->destructor = NULL; + skb->sk = NULL; + } } /** -- cgit v0.10.2 From 5c15257f93234aaa9775291a041b49eeb38fd95a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 30 Jul 2013 22:47:13 -0700 Subject: net: Remove extern from include/net/ scheduling prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Reflow modified prototypes to 80 columns. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/act_api.h b/include/net/act_api.h index b8ffac7..9e90fdf 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -82,36 +82,36 @@ struct tc_action_ops { int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); }; -extern struct tcf_common *tcf_hash_lookup(u32 index, - struct tcf_hashinfo *hinfo); -extern void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo); -extern int tcf_hash_release(struct tcf_common *p, int bind, - struct tcf_hashinfo *hinfo); -extern int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, - int type, struct tc_action *a); -extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo); -extern int tcf_hash_search(struct tc_action *a, u32 index); -extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, - int bind, struct tcf_hashinfo *hinfo); -extern struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, - struct tc_action *a, int size, - int bind, u32 *idx_gen, - struct tcf_hashinfo *hinfo); -extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo); +struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo); +void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo); +int tcf_hash_release(struct tcf_common *p, int bind, + struct tcf_hashinfo *hinfo); +int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, + int type, struct tc_action *a); +u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo); +int tcf_hash_search(struct tc_action *a, u32 index); +struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, + int bind, struct tcf_hashinfo *hinfo); +struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, + struct tc_action *a, int size, + int bind, u32 *idx_gen, + struct tcf_hashinfo *hinfo); +void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo); -extern int tcf_register_action(struct tc_action_ops *a); -extern int tcf_unregister_action(struct tc_action_ops *a); -extern void tcf_action_destroy(struct tc_action *a, int bind); -extern int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res); -extern struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla, - struct nlattr *est, char *n, int ovr, - int bind); -extern struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, - struct nlattr *est, char *n, int ovr, - int bind); -extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int); -extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); -extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); -extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *, int); +int tcf_register_action(struct tc_action_ops *a); +int tcf_unregister_action(struct tc_action_ops *a); +void tcf_action_destroy(struct tc_action *a, int bind); +int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res); +struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla, + struct nlattr *est, char *n, int ovr, + int bind); +struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, + struct nlattr *est, char *n, int ovr, + int bind); +int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int); +int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); +int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); +int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); #endif /* CONFIG_NET_CLS_ACT */ #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 1317450..2ebef77 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -14,8 +14,8 @@ struct tcf_walker { int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *); }; -extern int register_tcf_proto_ops(struct tcf_proto_ops *ops); -extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); +int register_tcf_proto_ops(struct tcf_proto_ops *ops); +int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) @@ -126,17 +126,17 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, return 0; } -extern int tcf_exts_validate(struct net *net, struct tcf_proto *tp, - struct nlattr **tb, struct nlattr *rate_tlv, - struct tcf_exts *exts, - const struct tcf_ext_map *map); -extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); -extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, - struct tcf_exts *src); -extern int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, - const struct tcf_ext_map *map); -extern int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, - const struct tcf_ext_map *map); +int tcf_exts_validate(struct net *net, struct tcf_proto *tp, + struct nlattr **tb, struct nlattr *rate_tlv, + struct tcf_exts *exts, + const struct tcf_ext_map *map); +void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); +void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, + struct tcf_exts *src); +int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, + const struct tcf_ext_map *map); +int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, + const struct tcf_ext_map *map); /** * struct tcf_pkt_info - packet information @@ -239,14 +239,14 @@ struct tcf_ematch_ops { struct list_head link; }; -extern int tcf_em_register(struct tcf_ematch_ops *); -extern void tcf_em_unregister(struct tcf_ematch_ops *); -extern int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, - struct tcf_ematch_tree *); -extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); -extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); -extern int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, - struct tcf_pkt_info *); +int tcf_em_register(struct tcf_ematch_ops *); +void tcf_em_unregister(struct tcf_ematch_ops *); +int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, + struct tcf_ematch_tree *); +void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); +int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); +int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, + struct tcf_pkt_info *); /** * tcf_em_tree_change - replace ematch tree of a running classifier diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 388bf8b..f7c24f8 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -64,8 +64,8 @@ struct qdisc_watchdog { struct Qdisc *qdisc; }; -extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); -extern void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); +void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); +void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) @@ -73,31 +73,31 @@ static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); } -extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); +void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); extern struct Qdisc_ops pfifo_qdisc_ops; extern struct Qdisc_ops bfifo_qdisc_ops; extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; -extern int fifo_set_limit(struct Qdisc *q, unsigned int limit); -extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, - unsigned int limit); - -extern int register_qdisc(struct Qdisc_ops *qops); -extern int unregister_qdisc(struct Qdisc_ops *qops); -extern void qdisc_list_del(struct Qdisc *q); -extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); -extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); -extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, - struct nlattr *tab); -extern void qdisc_put_rtab(struct qdisc_rate_table *tab); -extern void qdisc_put_stab(struct qdisc_size_table *tab); -extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc); -extern int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, - struct net_device *dev, struct netdev_queue *txq, - spinlock_t *root_lock); - -extern void __qdisc_run(struct Qdisc *q); +int fifo_set_limit(struct Qdisc *q, unsigned int limit); +struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, + unsigned int limit); + +int register_qdisc(struct Qdisc_ops *qops); +int unregister_qdisc(struct Qdisc_ops *qops); +void qdisc_list_del(struct Qdisc *q); +struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); +struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); +struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, + struct nlattr *tab); +void qdisc_put_rtab(struct qdisc_rate_table *tab); +void qdisc_put_stab(struct qdisc_size_table *tab); +void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc); +int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, struct netdev_queue *txq, + spinlock_t *root_lock); + +void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { @@ -105,10 +105,10 @@ static inline void qdisc_run(struct Qdisc *q) __qdisc_run(q); } -extern int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res); -extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, +int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res); +int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res); /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6eab6336..ffc9d88 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -350,30 +350,32 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) return NULL; } -extern int qdisc_class_hash_init(struct Qdisc_class_hash *); -extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); -extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); -extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); -extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); - -extern void dev_init_scheduler(struct net_device *dev); -extern void dev_shutdown(struct net_device *dev); -extern void dev_activate(struct net_device *dev); -extern void dev_deactivate(struct net_device *dev); -extern void dev_deactivate_many(struct list_head *head); -extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, - struct Qdisc *qdisc); -extern void qdisc_reset(struct Qdisc *qdisc); -extern void qdisc_destroy(struct Qdisc *qdisc); -extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); -extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops); -extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops, u32 parentid); -extern void __qdisc_calculate_pkt_len(struct sk_buff *skb, - const struct qdisc_size_table *stab); -extern void tcf_destroy(struct tcf_proto *tp); -extern void tcf_destroy_chain(struct tcf_proto **fl); +int qdisc_class_hash_init(struct Qdisc_class_hash *); +void qdisc_class_hash_insert(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_remove(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); +void qdisc_class_hash_destroy(struct Qdisc_class_hash *); + +void dev_init_scheduler(struct net_device *dev); +void dev_shutdown(struct net_device *dev); +void dev_activate(struct net_device *dev); +void dev_deactivate(struct net_device *dev); +void dev_deactivate_many(struct list_head *head); +struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc); +void qdisc_reset(struct Qdisc *qdisc); +void qdisc_destroy(struct Qdisc *qdisc); +void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); +struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, + struct Qdisc_ops *ops); +struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, + struct Qdisc_ops *ops, u32 parentid); +void __qdisc_calculate_pkt_len(struct sk_buff *skb, + const struct qdisc_size_table *stab); +void tcf_destroy(struct tcf_proto *tp); +void tcf_destroy_chain(struct tcf_proto **fl); /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) @@ -692,7 +694,8 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, return ((u64)(len + r->overhead) * r->mult) >> r->shift; } -extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); +void psched_ratecfg_precompute(struct psched_ratecfg *r, + const struct tc_ratespec *conf); static inline void psched_ratecfg_getrate(struct tc_ratespec *res, const struct psched_ratecfg *r) -- cgit v0.10.2 From 7764a45a8f1fe74d4f7d301eaca2e558e7e2831a Mon Sep 17 00:00:00 2001 From: Stefan Tomanek Date: Thu, 1 Aug 2013 02:17:15 +0200 Subject: fib_rules: add .suppress operation This change adds a new operation to the fib_rules_ops struct; it allows the suppression of routing decisions if certain criteria are not met by its results. The first implemented constraint is a minimum prefix length added to the structures of routing rules. If a rule is added with a minimum prefix length >0, only routes meeting this threshold will be considered. Any other (more general) routing table entries will be ignored. When configuring a system with multiple network uplinks and default routes, it is often convinient to reference the main routing table multiple times - but omitting the default route. Using this patch and a modified "ip" utility, this can be achieved by using the following command sequence: $ ip route add table secuplink default via 10.42.23.1 $ ip rule add pref 100 table main prefixlength 1 $ ip rule add pref 150 fwmark 0xA table secuplink With this setup, packets marked 0xA will be processed by the additional routing table "secuplink", but only if no suitable route in the main routing table can be found. By using a minimal prefixlength of 1, the default route (/0) of the table "main" is hidden to packets processed by rule 100; packets traveling to destinations with more specific routing entries are processed as usual. Signed-off-by: Stefan Tomanek Signed-off-by: David S. Miller diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index e361f48..2f286dc 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -18,6 +18,7 @@ struct fib_rule { u32 pref; u32 flags; u32 table; + u8 table_prefixlen_min; u8 action; u32 target; struct fib_rule __rcu *ctarget; @@ -46,6 +47,8 @@ struct fib_rules_ops { int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); + bool (*suppress)(struct fib_rule *, + struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, @@ -80,6 +83,7 @@ struct fib_rules_ops { [FRA_FWMARK] = { .type = NLA_U32 }, \ [FRA_FWMASK] = { .type = NLA_U32 }, \ [FRA_TABLE] = { .type = NLA_U32 }, \ + [FRA_TABLE_PREFIXLEN_MIN] = { .type = NLA_U8 }, \ [FRA_GOTO] = { .type = NLA_U32 } static inline void fib_rule_get(struct fib_rule *rule) diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h index 51da65b..59cd31b 100644 --- a/include/uapi/linux/fib_rules.h +++ b/include/uapi/linux/fib_rules.h @@ -45,7 +45,7 @@ enum { FRA_FLOW, /* flow/class id */ FRA_UNUSED6, FRA_UNUSED7, - FRA_UNUSED8, + FRA_TABLE_PREFIXLEN_MIN, FRA_TABLE, /* Extended table id */ FRA_FWMASK, /* mask for netfilter mark */ FRA_OIFNAME, diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 2173544..2ef5040 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -226,6 +226,9 @@ jumped: else err = ops->action(rule, fl, flags, arg); + if (!err && ops->suppress && ops->suppress(rule, arg)) + continue; + if (err != -EAGAIN) { if ((arg->flags & FIB_LOOKUP_NOREF) || likely(atomic_inc_not_zero(&rule->refcnt))) { @@ -337,6 +340,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) rule->action = frh->action; rule->flags = frh->flags; rule->table = frh_get_table(frh, tb); + if (tb[FRA_TABLE_PREFIXLEN_MIN]) + rule->table_prefixlen_min = nla_get_u8(tb[FRA_TABLE_PREFIXLEN_MIN]); if (!tb[FRA_PRIORITY] && ops->default_pref) rule->pref = ops->default_pref(ops); @@ -523,6 +528,7 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ + nla_total_size(4) /* FRA_PRIORITY */ + nla_total_size(4) /* FRA_TABLE */ + + nla_total_size(1) /* FRA_TABLE_PREFIXLEN_MIN */ + nla_total_size(4) /* FRA_FWMARK */ + nla_total_size(4); /* FRA_FWMASK */ @@ -548,6 +554,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh->table = rule->table; if (nla_put_u32(skb, FRA_TABLE, rule->table)) goto nla_put_failure; + if (nla_put_u8(skb, FRA_TABLE_PREFIXLEN_MIN, rule->table_prefixlen_min)) + goto nla_put_failure; frh->res1 = 0; frh->res2 = 0; frh->action = rule->action; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 26aa65d..9f29066 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -101,6 +101,19 @@ errout: return err; } +static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) +{ + /* do not accept result if the route does + * not meet the required prefix length + */ + struct fib_result *result = (struct fib_result *) arg->result; + if (result->prefixlen < rule->table_prefixlen_min) { + if (!(arg->flags & FIB_LOOKUP_NOREF)) + fib_info_put(result->fi); + return true; + } + return false; +} static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { @@ -267,6 +280,7 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = { .rule_size = sizeof(struct fib4_rule), .addr_size = sizeof(u32), .action = fib4_rule_action, + .suppress = fib4_rule_suppress, .match = fib4_rule_match, .configure = fib4_rule_configure, .delete = fib4_rule_delete, diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 2e1a432..e64e6a5 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -111,6 +111,18 @@ out: return rt == NULL ? -EAGAIN : 0; } +static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) +{ + struct rt6_info *rt = (struct rt6_info *) arg->result; + /* do not accept result if the route does + * not meet the required prefix length + */ + if (rt->rt6i_dst.plen < rule->table_prefixlen_min) { + ip6_rt_put(rt); + return true; + } + return false; +} static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { @@ -244,6 +256,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = { .addr_size = sizeof(struct in6_addr), .action = fib6_rule_action, .match = fib6_rule_match, + .suppress = fib6_rule_suppress, .configure = fib6_rule_configure, .compare = fib6_rule_compare, .fill = fib6_rule_fill, -- cgit v0.10.2 From 49dfe76261e427f5521b40321fbc3d947350165d Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 31 Jul 2013 15:16:20 -0400 Subject: Documentation: add networking/netdev-FAQ.txt A collection of expectations and operational details about how networking development takes place in the context of the netdev mailing list. The content is meant to capture specific items that are unique to netdev workflow, and not re-document generic linux expectations that are already captured elsewhere. This was originally proposed[1] as a regular posting mailing list FAQ, but it probably is more universally accessible here in tree. [1] https://lwn.net/Articles/559211/ Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 32dfbd9..18b64b2 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -124,6 +124,8 @@ multiqueue.txt - HOWTO for multiqueue network device support. netconsole.txt - The network console module netconsole.ko: configuration and notes. +netdev-FAQ.txt + - FAQ describing how to submit net changes to netdev mailing list. netdev-features.txt - Network interface features API description. netdevices.txt diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt new file mode 100644 index 0000000..d9112f0 --- /dev/null +++ b/Documentation/networking/netdev-FAQ.txt @@ -0,0 +1,224 @@ + +Information you need to know about netdev +----------------------------------------- + +Q: What is netdev? + +A: It is a mailing list for all network related linux stuff. This includes + anything found under net/ (i.e. core code like IPv6) and drivers/net + (i.e. hardware specific drivers) in the linux source tree. + + Note that some subsystems (e.g. wireless drivers) which have a high volume + of traffic have their own specific mailing lists. + + The netdev list is managed (like many other linux mailing lists) through + VGER ( http://vger.kernel.org/ ) and archives can be found below: + + http://marc.info/?l=linux-netdev + http://www.spinics.net/lists/netdev/ + + Aside from subsystems like that mentioned above, all network related linux + development (i.e. RFC, review, comments, etc) takes place on netdev. + +Q: How do the changes posted to netdev make their way into linux? + +A: There are always two trees (git repositories) in play. Both are driven + by David Miller, the main network maintainer. There is the "net" tree, + and the "net-next" tree. As you can probably guess from the names, the + net tree is for fixes to existing code already in the mainline tree from + Linus, and net-next is where the new code goes for the future release. + You can find the trees here: + + http://git.kernel.org/?p=linux/kernel/git/davem/net.git + http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git + +Q: How often do changes from these trees make it to the mainline Linus tree? + +A: To understand this, you need to know a bit of background information + on the cadence of linux development. Each new release starts off with + a two week "merge window" where the main maintainers feed their new + stuff to Linus for merging into the mainline tree. After the two weeks, + the merge window is closed, and it is called/tagged "-rc1". No new + features get mainlined after this -- only fixes to the rc1 content + are expected. After roughly a week of collecting fixes to the rc1 + content, rc2 is released. This repeats on a roughly weekly basis + until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if + things are in a state of churn), and a week after the last vX.Y-rcN + was done, the official "vX.Y" is released. + + Relating that to netdev: At the beginning of the 2 week merge window, + the net-next tree will be closed - no new changes/features. The + accumulated new content of the past ~10 weeks will be passed onto + mainline/Linus via a pull request for vX.Y -- at the same time, + the "net" tree will start accumulating fixes for this pulled content + relating to vX.Y + + An announcement indicating when net-next has been closed is usually + sent to netdev, but knowing the above, you can predict that in advance. + + IMPORTANT: Do not send new net-next content to netdev during the + period during which net-next tree is closed. + + Shortly after the two weeks have passed, (and vX.Y-rc1 is released) the + tree for net-next reopens to collect content for the next (vX.Y+1) release. + + If you aren't subscribed to netdev and/or are simply unsure if net-next + has re-opened yet, simply check the net-next git repository link above for + any new networking related commits. + + The "net" tree continues to collect fixes for the vX.Y content, and + is fed back to Linus at regular (~weekly) intervals. Meaning that the + focus for "net" is on stablilization and bugfixes. + + Finally, the vX.Y gets released, and the whole cycle starts over. + +Q: So where are we now in this cycle? + +A: Load the mainline (Linus) page here: + + http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git + + and note the top of the "tags" section. If it is rc1, it is early + in the dev cycle. If it was tagged rc7 a week ago, then a release + is probably imminent. + +Q: How do I indicate which tree (net vs. net-next) my patch should be in? + +A: Firstly, think whether you have a bug fix or new "next-like" content. + Then once decided, assuming that you use git, use the prefix flag, i.e. + + git format-patch --subject-prefix='PATCH net-next' start..finish + + Use "net" instead of "net-next" (always lower case) in the above for + bug-fix net content. If you don't use git, then note the only magic in + the above is just the subject text of the outgoing e-mail, and you can + manually change it yourself with whatever MUA you are comfortable with. + +Q: I sent a patch and I'm wondering what happened to it. How can I tell + whether it got merged? + +A: Start by looking at the main patchworks queue for netdev: + + http://patchwork.ozlabs.org/project/netdev/list/ + + The "State" field will tell you exactly where things are at with + your patch. + +Q: The above only says "Under Review". How can I find out more? + +A: Generally speaking, the patches get triaged quickly (in less than 48h). + So be patient. Asking the maintainer for status updates on your + patch is a good way to ensure your patch is ignored or pushed to + the bottom of the priority list. + +Q: How can I tell what patches are queued up for backporting to the + various stable releases? + +A: Normally Greg Kroah-Hartman collects stable commits himself, but + for networking, Dave collects up patches he deems critical for the + networking subsystem, and then hands them off to Greg. + + There is a patchworks queue that you can see here: + http://patchwork.ozlabs.org/bundle/davem/stable/?state=* + + It contains the patches which Dave has selected, but not yet handed + off to Greg. If Greg already has the patch, then it will be here: + http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git + + A quick way to find whether the patch is in this stable-queue is + to simply clone the repo, and then git grep the mainline commit ID, e.g. + + stable-queue$ git grep -l 284041ef21fdf2e + releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch + releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch + releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch + stable/stable-queue$ + +Q: I see a network patch and I think it should be backported to stable. + Should I request it via "stable@vger.kernel.org" like the references in + the kernel's Documentation/stable_kernel_rules.txt file say? + +A: No, not for networking. Check the stable queues as per above 1st to see + if it is already queued. If not, then send a mail to netdev, listing + the upstream commit ID and why you think it should be a stable candidate. + + Before you jump to go do the above, do note that the normal stable rules + in Documentation/stable_kernel_rules.txt still apply. So you need to + explicitly indicate why it is a critical fix and exactly what users are + impacted. In addition, you need to convince yourself that you _really_ + think it has been overlooked, vs. having been considered and rejected. + + Generally speaking, the longer it has had a chance to "soak" in mainline, + the better the odds that it is an OK candidate for stable. So scrambling + to request a commit be added the day after it appears should be avoided. + +Q: I have created a network patch and I think it should be backported to + stable. Should I add a "Cc: stable@vger.kernel.org" like the references + in the kernel's Documentation/ directory say? + +A: No. See above answer. In short, if you think it really belongs in + stable, then ensure you write a decent commit log that describes who + gets impacted by the bugfix and how it manifests itself, and when the + bug was introduced. If you do that properly, then the commit will + get handled appropriately and most likely get put in the patchworks + stable queue if it really warrants it. + + If you think there is some valid information relating to it being in + stable that does _not_ belong in the commit log, then use the three + dash marker line as described in Documentation/SubmittingPatches to + temporarily embed that information into the patch that you send. + +Q: Someone said that the comment style and coding convention is different + for the networking content. Is this true? + +A: Yes, in a largely trivial way. Instead of this: + + /* + * foobar blah blah blah + * another line of text + */ + + it is requested that you make it look like this: + + /* foobar blah blah blah + * another line of text + */ + +Q: I am working in existing code that has the former comment style and not the + latter. Should I submit new code in the former style or the latter? + +A: Make it the latter style, so that eventually all code in the domain of + netdev is of this format. + +Q: I found a bug that might have possible security implications or similar. + Should I mail the main netdev maintainer off-list? + +A: No. The current netdev maintainer has consistently requested that people + use the mailing lists and not reach out directly. If you aren't OK with + that, then perhaps consider mailing "security@kernel.org" or reading about + http://oss-security.openwall.org/wiki/mailing-lists/distros + as possible alternative mechanisms. + +Q: What level of testing is expected before I submit my change? + +A: If your changes are against net-next, the expectation is that you + have tested by layering your changes on top of net-next. Ideally you + will have done run-time testing specific to your change, but at a + minimum, your changes should survive an "allyesconfig" and an + "allmodconfig" build without new warnings or failures. + +Q: Any other tips to help ensure my net/net-next patch gets OK'd? + +A: Attention to detail. Re-read your own work as if you were the + reviewer. You can start with using checkpatch.pl, perhaps even + with the "--strict" flag. But do not be mindlessly robotic in + doing so. If your change is a bug-fix, make sure your commit log + indicates the end-user visible symptom, the underlying reason as + to why it happens, and then if necessary, explain why the fix proposed + is the best way to get things done. Don't mangle whitespace, and as + is common, don't mis-indent function arguments that span multiple lines. + If it is your 1st patch, mail it to yourself so you can test apply + it to an unpatched tree to confirm infrastructure didn't mangle it. + + Finally, go back and read Documentation/SubmittingPatches to be + sure you are not repeating some common mistake documented there. -- cgit v0.10.2 From e8e54d3c13b35479294f3dceb6bd6ed7a7dc5bf8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:32 -0700 Subject: addrconf.h: Remove extern function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Reflow modified prototypes to 80 columns. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/addrconf.h b/include/net/addrconf.h index c7b181c..43fa31a 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -53,51 +53,36 @@ struct prefix_info { #define IN6_ADDR_HSIZE_SHIFT 4 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT) -extern int addrconf_init(void); -extern void addrconf_cleanup(void); +int addrconf_init(void); +void addrconf_cleanup(void); -extern int addrconf_add_ifaddr(struct net *net, - void __user *arg); -extern int addrconf_del_ifaddr(struct net *net, - void __user *arg); -extern int addrconf_set_dstaddr(struct net *net, - void __user *arg); +int addrconf_add_ifaddr(struct net *net, void __user *arg); +int addrconf_del_ifaddr(struct net *net, void __user *arg); +int addrconf_set_dstaddr(struct net *net, void __user *arg); -extern int ipv6_chk_addr(struct net *net, - const struct in6_addr *addr, - const struct net_device *dev, - int strict); +int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, int strict); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) -extern int ipv6_chk_home_addr(struct net *net, - const struct in6_addr *addr); +int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); #endif -extern int ipv6_chk_prefix(const struct in6_addr *addr, - struct net_device *dev); - -extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, - const struct in6_addr *addr, - struct net_device *dev, - int strict); - -extern int ipv6_dev_get_saddr(struct net *net, - const struct net_device *dev, - const struct in6_addr *daddr, - unsigned int srcprefs, - struct in6_addr *saddr); -extern int __ipv6_get_lladdr(struct inet6_dev *idev, - struct in6_addr *addr, - unsigned char banned_flags); -extern int ipv6_get_lladdr(struct net_device *dev, - struct in6_addr *addr, - unsigned char banned_flags); -extern int ipv6_rcv_saddr_equal(const struct sock *sk, - const struct sock *sk2); -extern void addrconf_join_solict(struct net_device *dev, - const struct in6_addr *addr); -extern void addrconf_leave_solict(struct inet6_dev *idev, - const struct in6_addr *addr); +int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); + +struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, + const struct in6_addr *addr, + struct net_device *dev, int strict); + +int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, + const struct in6_addr *daddr, unsigned int srcprefs, + struct in6_addr *saddr); +int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, + unsigned char banned_flags); +int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, + unsigned char banned_flags); +int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2); +void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); +void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); static inline unsigned long addrconf_timeout_fixup(u32 timeout, unsigned int unit) @@ -124,41 +109,38 @@ static inline int addrconf_finite_timeout(unsigned long timeout) /* * IPv6 Address Label subsystem (addrlabel.c) */ -extern int ipv6_addr_label_init(void); -extern void ipv6_addr_label_cleanup(void); -extern void ipv6_addr_label_rtnl_register(void); -extern u32 ipv6_addr_label(struct net *net, - const struct in6_addr *addr, - int type, int ifindex); +int ipv6_addr_label_init(void); +void ipv6_addr_label_cleanup(void); +void ipv6_addr_label_rtnl_register(void); +u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr, + int type, int ifindex); /* * multicast prototypes (mcast.c) */ -extern int ipv6_sock_mc_join(struct sock *sk, int ifindex, - const struct in6_addr *addr); -extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex, - const struct in6_addr *addr); -extern void ipv6_sock_mc_close(struct sock *sk); -extern bool inet6_mc_check(struct sock *sk, - const struct in6_addr *mc_addr, - const struct in6_addr *src_addr); - -extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr); -extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr); -extern int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr); -extern void ipv6_mc_up(struct inet6_dev *idev); -extern void ipv6_mc_down(struct inet6_dev *idev); -extern void ipv6_mc_unmap(struct inet6_dev *idev); -extern void ipv6_mc_remap(struct inet6_dev *idev); -extern void ipv6_mc_init_dev(struct inet6_dev *idev); -extern void ipv6_mc_destroy_dev(struct inet6_dev *idev); -extern void addrconf_dad_failure(struct inet6_ifaddr *ifp); - -extern bool ipv6_chk_mcast_addr(struct net_device *dev, - const struct in6_addr *group, - const struct in6_addr *src_addr); - -extern void ipv6_mc_dad_complete(struct inet6_dev *idev); +int ipv6_sock_mc_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int ipv6_sock_mc_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); +void ipv6_sock_mc_close(struct sock *sk); +bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, + const struct in6_addr *src_addr); + +int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr); +int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr); +int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr); +void ipv6_mc_up(struct inet6_dev *idev); +void ipv6_mc_down(struct inet6_dev *idev); +void ipv6_mc_unmap(struct inet6_dev *idev); +void ipv6_mc_remap(struct inet6_dev *idev); +void ipv6_mc_init_dev(struct inet6_dev *idev); +void ipv6_mc_destroy_dev(struct inet6_dev *idev); +void addrconf_dad_failure(struct inet6_ifaddr *ifp); + +bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, + const struct in6_addr *src_addr); + +void ipv6_mc_dad_complete(struct inet6_dev *idev); /* * identify MLD packets for MLD filter exceptions */ @@ -184,29 +166,31 @@ static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset) return false; } -extern void addrconf_prefix_rcv(struct net_device *dev, - u8 *opt, int len, bool sllao); +void addrconf_prefix_rcv(struct net_device *dev, + u8 *opt, int len, bool sllao); /* * anycast prototypes (anycast.c) */ -extern int ipv6_sock_ac_join(struct sock *sk,int ifindex, const struct in6_addr *addr); -extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex, const struct in6_addr *addr); -extern void ipv6_sock_ac_close(struct sock *sk); - -extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); -extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); -extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, +int ipv6_sock_ac_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); +int ipv6_sock_ac_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); +void ipv6_sock_ac_close(struct sock *sk); + +int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); +int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); +bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, const struct in6_addr *addr); /* Device notifier */ -extern int register_inet6addr_notifier(struct notifier_block *nb); -extern int unregister_inet6addr_notifier(struct notifier_block *nb); -extern int inet6addr_notifier_call_chain(unsigned long val, void *v); +int register_inet6addr_notifier(struct notifier_block *nb); +int unregister_inet6addr_notifier(struct notifier_block *nb); +int inet6addr_notifier_call_chain(unsigned long val, void *v); -extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, - struct ipv6_devconf *devconf); +void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, + struct ipv6_devconf *devconf); /** * __in6_dev_get - get inet6_dev pointer from netdevice @@ -240,7 +224,7 @@ static inline struct inet6_dev *in6_dev_get(const struct net_device *dev) return idev; } -extern void in6_dev_finish_destroy(struct inet6_dev *idev); +void in6_dev_finish_destroy(struct inet6_dev *idev); static inline void in6_dev_put(struct inet6_dev *idev) { @@ -258,7 +242,7 @@ static inline void in6_dev_hold(struct inet6_dev *idev) atomic_inc(&idev->refcnt); } -extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); +void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); static inline void in6_ifa_put(struct inet6_ifaddr *ifp) { @@ -340,8 +324,8 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr) } #ifdef CONFIG_PROC_FS -extern int if6_proc_init(void); -extern void if6_proc_exit(void); +int if6_proc_init(void); +void if6_proc_exit(void); #endif #endif -- cgit v0.10.2 From b60a8280ba2a34351dd8b98664df3785c27528dd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:33 -0700 Subject: af_unix.h: Remove extern from function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Reflow modified prototypes to 80 columns. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/af_unix.h b/include/net/af_unix.h index dbdfd2b..05442df 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -6,12 +6,12 @@ #include #include -extern void unix_inflight(struct file *fp); -extern void unix_notinflight(struct file *fp); -extern void unix_gc(void); -extern void wait_for_unix_gc(void); -extern struct sock *unix_get_socket(struct file *filp); -extern struct sock *unix_peer_get(struct sock *); +void unix_inflight(struct file *fp); +void unix_notinflight(struct file *fp); +void unix_gc(void); +void wait_for_unix_gc(void); +struct sock *unix_get_socket(struct file *filp); +struct sock *unix_peer_get(struct sock *); #define UNIX_HASH_SIZE 256 #define UNIX_HASH_BITS 8 @@ -71,8 +71,8 @@ long unix_inq_len(struct sock *sk); long unix_outq_len(struct sock *sk); #ifdef CONFIG_SYSCTL -extern int unix_sysctl_register(struct net *net); -extern void unix_sysctl_unregister(struct net *net); +int unix_sysctl_register(struct net *net); +void unix_sysctl_unregister(struct net *net); #else static inline int unix_sysctl_register(struct net *net) { return 0; } static inline void unix_sysctl_unregister(struct net *net) {} -- cgit v0.10.2 From cd2cf63a567fb45be445ccc0aed8e551d86f0314 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:34 -0700 Subject: af_rxrpc.h: Remove extern from function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Reflow modified prototypes to 80 columns. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 03e6e94..e797d45 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -31,24 +31,21 @@ enum { typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long, struct sk_buff *); -extern void rxrpc_kernel_intercept_rx_messages(struct socket *, - rxrpc_interceptor_t); -extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, - struct sockaddr_rxrpc *, - struct key *, - unsigned long, - gfp_t); -extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, - size_t); -extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); -extern void rxrpc_kernel_end_call(struct rxrpc_call *); -extern bool rxrpc_kernel_is_data_last(struct sk_buff *); -extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *); -extern int rxrpc_kernel_get_error_number(struct sk_buff *); -extern void rxrpc_kernel_data_delivered(struct sk_buff *); -extern void rxrpc_kernel_free_skb(struct sk_buff *); -extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, - unsigned long); -extern int rxrpc_kernel_reject_call(struct socket *); +void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t); +struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, + struct sockaddr_rxrpc *, + struct key *, + unsigned long, + gfp_t); +int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); +void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); +void rxrpc_kernel_end_call(struct rxrpc_call *); +bool rxrpc_kernel_is_data_last(struct sk_buff *); +u32 rxrpc_kernel_get_abort_code(struct sk_buff *); +int rxrpc_kernel_get_error_number(struct sk_buff *); +void rxrpc_kernel_data_delivered(struct sk_buff *); +void rxrpc_kernel_free_skb(struct sk_buff *); +struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); +int rxrpc_kernel_reject_call(struct socket *); #endif /* _NET_RXRPC_H */ -- cgit v0.10.2 From 90972b22116bc22181717517ad5b0c7043edb178 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:35 -0700 Subject: arp/neighbour.h: Remove extern from function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Reflow modified prototypes to 80 columns. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/arp.h b/include/net/arp.h index b630dae..7509d9d 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -46,22 +46,22 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 return n; } -extern void arp_init(void); -extern int arp_find(unsigned char *haddr, struct sk_buff *skb); -extern int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); -extern void arp_send(int type, int ptype, __be32 dest_ip, - struct net_device *dev, __be32 src_ip, - const unsigned char *dest_hw, - const unsigned char *src_hw, const unsigned char *th); -extern int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); -extern void arp_ifdown(struct net_device *dev); +void arp_init(void); +int arp_find(unsigned char *haddr, struct sk_buff *skb); +int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); +void arp_send(int type, int ptype, __be32 dest_ip, + struct net_device *dev, __be32 src_ip, + const unsigned char *dest_hw, + const unsigned char *src_hw, const unsigned char *th); +int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); +void arp_ifdown(struct net_device *dev); -extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, - struct net_device *dev, __be32 src_ip, - const unsigned char *dest_hw, - const unsigned char *src_hw, - const unsigned char *target_hw); -extern void arp_xmit(struct sk_buff *skb); +struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, + struct net_device *dev, __be32 src_ip, + const unsigned char *dest_hw, + const unsigned char *src_hw, + const unsigned char *target_hw); +void arp_xmit(struct sk_buff *skb); int arp_invalidate(struct net_device *dev, __be32 ip); #endif /* _ARP_H */ diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 7e748ad..536501a 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -195,68 +195,67 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 -extern void neigh_table_init(struct neigh_table *tbl); -extern int neigh_table_clear(struct neigh_table *tbl); -extern struct neighbour * neigh_lookup(struct neigh_table *tbl, - const void *pkey, - struct net_device *dev); -extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl, - struct net *net, - const void *pkey); -extern struct neighbour * __neigh_create(struct neigh_table *tbl, - const void *pkey, - struct net_device *dev, - bool want_ref); +void neigh_table_init(struct neigh_table *tbl); +int neigh_table_clear(struct neigh_table *tbl); +struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, + struct net_device *dev); +struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, + const void *pkey); +struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, + struct net_device *dev, bool want_ref); static inline struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, struct net_device *dev) { return __neigh_create(tbl, pkey, dev, true); } -extern void neigh_destroy(struct neighbour *neigh); -extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); -extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, - u32 flags); -extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); -extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); -extern int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); -extern int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); -extern int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); -extern int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); -extern struct neighbour *neigh_event_ns(struct neigh_table *tbl, +void neigh_destroy(struct neighbour *neigh); +int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); +int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags); +void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); +int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); +int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); +struct neighbour *neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct net_device *dev); -extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl); -extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); +struct neigh_parms *neigh_parms_alloc(struct net_device *dev, + struct neigh_table *tbl); +void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); static inline -struct net *neigh_parms_net(const struct neigh_parms *parms) +struct net *neigh_parms_net(const struct neigh_parms *parms) { return read_pnet(&parms->net); } -extern unsigned long neigh_rand_reach_time(unsigned long base); +unsigned long neigh_rand_reach_time(unsigned long base); -extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, - struct sk_buff *skb); -extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat); -extern struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, - struct net *net, - const void *key, - struct net_device *dev); -extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev); +void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, + struct sk_buff *skb); +struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, + const void *key, struct net_device *dev, + int creat); +struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, + const void *key, struct net_device *dev); +int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, + struct net_device *dev); -static inline -struct net *pneigh_net(const struct pneigh_entry *pneigh) +static inline struct net *pneigh_net(const struct pneigh_entry *pneigh) { return read_pnet(&pneigh->net); } -extern void neigh_app_ns(struct neighbour *n); -extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); -extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); -extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); +void neigh_app_ns(struct neighbour *n); +void neigh_for_each(struct neigh_table *tbl, + void (*cb)(struct neighbour *, void *), void *cookie); +void __neigh_for_each_release(struct neigh_table *tbl, + int (*cb)(struct neighbour *)); +void pneigh_for_each(struct neigh_table *tbl, + void (*cb)(struct pneigh_entry *)); struct neigh_seq_state { struct seq_net_private p; @@ -270,15 +269,14 @@ struct neigh_seq_state { #define NEIGH_SEQ_IS_PNEIGH 0x00000002 #define NEIGH_SEQ_SKIP_NOARP 0x00000004 }; -extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int); -extern void *neigh_seq_next(struct seq_file *, void *, loff_t *); -extern void neigh_seq_stop(struct seq_file *, void *); - -extern int neigh_sysctl_register(struct net_device *dev, - struct neigh_parms *p, - char *p_name, - proc_handler *proc_handler); -extern void neigh_sysctl_unregister(struct neigh_parms *p); +void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, + unsigned int); +void *neigh_seq_next(struct seq_file *, void *, loff_t *); +void neigh_seq_stop(struct seq_file *, void *); + +int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, + char *p_name, proc_handler *proc_handler); +void neigh_sysctl_unregister(struct neigh_parms *p); static inline void __neigh_parms_put(struct neigh_parms *parms) { -- cgit v0.10.2 From c1d8f8041cd0357643d5d051d1425a17b75e9d2c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:36 -0700 Subject: ax25.h: Remove extern from function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Reflow modified prototypes to 80 columns. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/ax25.h b/include/net/ax25.h index 89ed9ac..bf0396e 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -195,7 +195,7 @@ static inline void ax25_hold_route(ax25_route *ax25_rt) atomic_inc(&ax25_rt->refcount); } -extern void __ax25_put_route(ax25_route *ax25_rt); +void __ax25_put_route(ax25_route *ax25_rt); static inline void ax25_put_route(ax25_route *ax25_rt) { @@ -272,30 +272,31 @@ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev /* af_ax25.c */ extern struct hlist_head ax25_list; extern spinlock_t ax25_list_lock; -extern void ax25_cb_add(ax25_cb *); +void ax25_cb_add(ax25_cb *); struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int); struct sock *ax25_get_socket(ax25_address *, ax25_address *, int); -extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *); -extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int); -extern void ax25_destroy_socket(ax25_cb *); -extern ax25_cb * __must_check ax25_create_cb(void); -extern void ax25_fillin_cb(ax25_cb *, ax25_dev *); -extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *); +ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, + struct net_device *); +void ax25_send_to_raw(ax25_address *, struct sk_buff *, int); +void ax25_destroy_socket(ax25_cb *); +ax25_cb * __must_check ax25_create_cb(void); +void ax25_fillin_cb(ax25_cb *, ax25_dev *); +struct sock *ax25_make_new(struct sock *, struct ax25_dev *); /* ax25_addr.c */ extern const ax25_address ax25_bcast; extern const ax25_address ax25_defaddr; extern const ax25_address null_ax25_address; -extern char *ax2asc(char *buf, const ax25_address *); -extern void asc2ax(ax25_address *addr, const char *callsign); -extern int ax25cmp(const ax25_address *, const ax25_address *); -extern int ax25digicmp(const ax25_digi *, const ax25_digi *); -extern const unsigned char *ax25_addr_parse(const unsigned char *, int, +char *ax2asc(char *buf, const ax25_address *); +void asc2ax(ax25_address *addr, const char *callsign); +int ax25cmp(const ax25_address *, const ax25_address *); +int ax25digicmp(const ax25_digi *, const ax25_digi *); +const unsigned char *ax25_addr_parse(const unsigned char *, int, ax25_address *, ax25_address *, ax25_digi *, int *, int *); -extern int ax25_addr_build(unsigned char *, const ax25_address *, - const ax25_address *, const ax25_digi *, int, int); -extern int ax25_addr_size(const ax25_digi *); -extern void ax25_digi_invert(const ax25_digi *, ax25_digi *); +int ax25_addr_build(unsigned char *, const ax25_address *, + const ax25_address *, const ax25_digi *, int, int); +int ax25_addr_size(const ax25_digi *); +void ax25_digi_invert(const ax25_digi *, ax25_digi *); /* ax25_dev.c */ extern ax25_dev *ax25_dev_list; @@ -306,33 +307,33 @@ static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev) return dev->ax25_ptr; } -extern ax25_dev *ax25_addr_ax25dev(ax25_address *); -extern void ax25_dev_device_up(struct net_device *); -extern void ax25_dev_device_down(struct net_device *); -extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *); -extern struct net_device *ax25_fwd_dev(struct net_device *); -extern void ax25_dev_free(void); +ax25_dev *ax25_addr_ax25dev(ax25_address *); +void ax25_dev_device_up(struct net_device *); +void ax25_dev_device_down(struct net_device *); +int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *); +struct net_device *ax25_fwd_dev(struct net_device *); +void ax25_dev_free(void); /* ax25_ds_in.c */ -extern int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int); +int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int); /* ax25_ds_subr.c */ -extern void ax25_ds_nr_error_recovery(ax25_cb *); -extern void ax25_ds_enquiry_response(ax25_cb *); -extern void ax25_ds_establish_data_link(ax25_cb *); -extern void ax25_dev_dama_off(ax25_dev *); -extern void ax25_dama_on(ax25_cb *); -extern void ax25_dama_off(ax25_cb *); +void ax25_ds_nr_error_recovery(ax25_cb *); +void ax25_ds_enquiry_response(ax25_cb *); +void ax25_ds_establish_data_link(ax25_cb *); +void ax25_dev_dama_off(ax25_dev *); +void ax25_dama_on(ax25_cb *); +void ax25_dama_off(ax25_cb *); /* ax25_ds_timer.c */ -extern void ax25_ds_setup_timer(ax25_dev *); -extern void ax25_ds_set_timer(ax25_dev *); -extern void ax25_ds_del_timer(ax25_dev *); -extern void ax25_ds_timer(ax25_cb *); -extern void ax25_ds_t1_timeout(ax25_cb *); -extern void ax25_ds_heartbeat_expiry(ax25_cb *); -extern void ax25_ds_t3timer_expiry(ax25_cb *); -extern void ax25_ds_idletimer_expiry(ax25_cb *); +void ax25_ds_setup_timer(ax25_dev *); +void ax25_ds_set_timer(ax25_dev *); +void ax25_ds_del_timer(ax25_dev *); +void ax25_ds_timer(ax25_cb *); +void ax25_ds_t1_timeout(ax25_cb *); +void ax25_ds_heartbeat_expiry(ax25_cb *); +void ax25_ds_t3timer_expiry(ax25_cb *); +void ax25_ds_idletimer_expiry(ax25_cb *); /* ax25_iface.c */ @@ -342,107 +343,109 @@ struct ax25_protocol { int (*func)(struct sk_buff *, ax25_cb *); }; -extern void ax25_register_pid(struct ax25_protocol *ap); -extern void ax25_protocol_release(unsigned int); +void ax25_register_pid(struct ax25_protocol *ap); +void ax25_protocol_release(unsigned int); struct ax25_linkfail { struct hlist_node lf_node; void (*func)(ax25_cb *, int); }; -extern void ax25_linkfail_register(struct ax25_linkfail *lf); -extern void ax25_linkfail_release(struct ax25_linkfail *lf); -extern int __must_check ax25_listen_register(ax25_address *, - struct net_device *); -extern void ax25_listen_release(ax25_address *, struct net_device *); -extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *); -extern int ax25_listen_mine(ax25_address *, struct net_device *); -extern void ax25_link_failed(ax25_cb *, int); -extern int ax25_protocol_is_registered(unsigned int); +void ax25_linkfail_register(struct ax25_linkfail *lf); +void ax25_linkfail_release(struct ax25_linkfail *lf); +int __must_check ax25_listen_register(ax25_address *, struct net_device *); +void ax25_listen_release(ax25_address *, struct net_device *); +int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *); +int ax25_listen_mine(ax25_address *, struct net_device *); +void ax25_link_failed(ax25_cb *, int); +int ax25_protocol_is_registered(unsigned int); /* ax25_in.c */ -extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *); -extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); +int ax25_rx_iframe(ax25_cb *, struct sk_buff *); +int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, + struct net_device *); /* ax25_ip.c */ -extern int ax25_hard_header(struct sk_buff *, struct net_device *, - unsigned short, const void *, - const void *, unsigned int); -extern int ax25_rebuild_header(struct sk_buff *); +int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short, + const void *, const void *, unsigned int); +int ax25_rebuild_header(struct sk_buff *); extern const struct header_ops ax25_header_ops; /* ax25_out.c */ -extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *); -extern void ax25_output(ax25_cb *, int, struct sk_buff *); -extern void ax25_kick(ax25_cb *); -extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int); -extern void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev); -extern int ax25_check_iframes_acked(ax25_cb *, unsigned short); +ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, + ax25_digi *, struct net_device *); +void ax25_output(ax25_cb *, int, struct sk_buff *); +void ax25_kick(ax25_cb *); +void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int); +void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev); +int ax25_check_iframes_acked(ax25_cb *, unsigned short); /* ax25_route.c */ -extern void ax25_rt_device_down(struct net_device *); -extern int ax25_rt_ioctl(unsigned int, void __user *); +void ax25_rt_device_down(struct net_device *); +int ax25_rt_ioctl(unsigned int, void __user *); extern const struct file_operations ax25_route_fops; -extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); -extern int ax25_rt_autobind(ax25_cb *, ax25_address *); -extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); -extern void ax25_rt_free(void); +ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); +int ax25_rt_autobind(ax25_cb *, ax25_address *); +struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, + ax25_address *, ax25_digi *); +void ax25_rt_free(void); /* ax25_std_in.c */ -extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); +int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); /* ax25_std_subr.c */ -extern void ax25_std_nr_error_recovery(ax25_cb *); -extern void ax25_std_establish_data_link(ax25_cb *); -extern void ax25_std_transmit_enquiry(ax25_cb *); -extern void ax25_std_enquiry_response(ax25_cb *); -extern void ax25_std_timeout_response(ax25_cb *); +void ax25_std_nr_error_recovery(ax25_cb *); +void ax25_std_establish_data_link(ax25_cb *); +void ax25_std_transmit_enquiry(ax25_cb *); +void ax25_std_enquiry_response(ax25_cb *); +void ax25_std_timeout_response(ax25_cb *); /* ax25_std_timer.c */ -extern void ax25_std_heartbeat_expiry(ax25_cb *); -extern void ax25_std_t1timer_expiry(ax25_cb *); -extern void ax25_std_t2timer_expiry(ax25_cb *); -extern void ax25_std_t3timer_expiry(ax25_cb *); -extern void ax25_std_idletimer_expiry(ax25_cb *); +void ax25_std_heartbeat_expiry(ax25_cb *); +void ax25_std_t1timer_expiry(ax25_cb *); +void ax25_std_t2timer_expiry(ax25_cb *); +void ax25_std_t3timer_expiry(ax25_cb *); +void ax25_std_idletimer_expiry(ax25_cb *); /* ax25_subr.c */ -extern void ax25_clear_queues(ax25_cb *); -extern void ax25_frames_acked(ax25_cb *, unsigned short); -extern void ax25_requeue_frames(ax25_cb *); -extern int ax25_validate_nr(ax25_cb *, unsigned short); -extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *); -extern void ax25_send_control(ax25_cb *, int, int, int); -extern void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *); -extern void ax25_calculate_t1(ax25_cb *); -extern void ax25_calculate_rtt(ax25_cb *); -extern void ax25_disconnect(ax25_cb *, int); +void ax25_clear_queues(ax25_cb *); +void ax25_frames_acked(ax25_cb *, unsigned short); +void ax25_requeue_frames(ax25_cb *); +int ax25_validate_nr(ax25_cb *, unsigned short); +int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *); +void ax25_send_control(ax25_cb *, int, int, int); +void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, + ax25_digi *); +void ax25_calculate_t1(ax25_cb *); +void ax25_calculate_rtt(ax25_cb *); +void ax25_disconnect(ax25_cb *, int); /* ax25_timer.c */ -extern void ax25_setup_timers(ax25_cb *); -extern void ax25_start_heartbeat(ax25_cb *); -extern void ax25_start_t1timer(ax25_cb *); -extern void ax25_start_t2timer(ax25_cb *); -extern void ax25_start_t3timer(ax25_cb *); -extern void ax25_start_idletimer(ax25_cb *); -extern void ax25_stop_heartbeat(ax25_cb *); -extern void ax25_stop_t1timer(ax25_cb *); -extern void ax25_stop_t2timer(ax25_cb *); -extern void ax25_stop_t3timer(ax25_cb *); -extern void ax25_stop_idletimer(ax25_cb *); -extern int ax25_t1timer_running(ax25_cb *); -extern unsigned long ax25_display_timer(struct timer_list *); +void ax25_setup_timers(ax25_cb *); +void ax25_start_heartbeat(ax25_cb *); +void ax25_start_t1timer(ax25_cb *); +void ax25_start_t2timer(ax25_cb *); +void ax25_start_t3timer(ax25_cb *); +void ax25_start_idletimer(ax25_cb *); +void ax25_stop_heartbeat(ax25_cb *); +void ax25_stop_t1timer(ax25_cb *); +void ax25_stop_t2timer(ax25_cb *); +void ax25_stop_t3timer(ax25_cb *); +void ax25_stop_idletimer(ax25_cb *); +int ax25_t1timer_running(ax25_cb *); +unsigned long ax25_display_timer(struct timer_list *); /* ax25_uid.c */ extern int ax25_uid_policy; -extern ax25_uid_assoc *ax25_findbyuid(kuid_t); -extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *); +ax25_uid_assoc *ax25_findbyuid(kuid_t); +int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *); extern const struct file_operations ax25_uid_fops; -extern void ax25_uid_free(void); +void ax25_uid_free(void); /* sysctl_net_ax25.c */ #ifdef CONFIG_SYSCTL -extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev); -extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev); +int ax25_register_dev_sysctl(ax25_dev *ax25_dev); +void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev); #else static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; } static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {} -- cgit v0.10.2 From 10dd9b7ce37e359066b9ab59592f81684360561a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:37 -0700 Subject: cfg80211.h/mac80211.h: Remove extern from function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Reflow modified prototypes to 80 columns. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 7b0730a..f49de28 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2841,7 +2841,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv); * * Return: A non-negative wiphy index or a negative error code. */ -extern int wiphy_register(struct wiphy *wiphy); +int wiphy_register(struct wiphy *wiphy); /** * wiphy_unregister - deregister a wiphy from cfg80211 @@ -2852,14 +2852,14 @@ extern int wiphy_register(struct wiphy *wiphy); * pointer, but the call may sleep to wait for an outstanding * request that is being handled. */ -extern void wiphy_unregister(struct wiphy *wiphy); +void wiphy_unregister(struct wiphy *wiphy); /** * wiphy_free - free wiphy * * @wiphy: The wiphy to free */ -extern void wiphy_free(struct wiphy *wiphy); +void wiphy_free(struct wiphy *wiphy); /* internal structs */ struct cfg80211_conn; @@ -3014,14 +3014,14 @@ static inline void *wdev_priv(struct wireless_dev *wdev) * @band: band, necessary due to channel number overlap * Return: The corresponding frequency (in MHz), or 0 if the conversion failed. */ -extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band); +int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band); /** * ieee80211_frequency_to_channel - convert frequency to channel number * @freq: center frequency * Return: The corresponding channel, or 0 if the conversion failed. */ -extern int ieee80211_frequency_to_channel(int freq); +int ieee80211_frequency_to_channel(int freq); /* * Name indirection necessary because the ieee80211 code also has @@ -3030,8 +3030,8 @@ extern int ieee80211_frequency_to_channel(int freq); * to include both header files you'll (rightfully!) get a symbol * clash. */ -extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, - int freq); +struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, + int freq); /** * ieee80211_get_channel - get channel struct from wiphy for specified frequency * @wiphy: the struct wiphy to get the channel for @@ -3141,13 +3141,14 @@ struct ieee80211_radiotap_iterator { int _reset_on_ext; }; -extern int ieee80211_radiotap_iterator_init( - struct ieee80211_radiotap_iterator *iterator, - struct ieee80211_radiotap_header *radiotap_header, - int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns); +int +ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator, + struct ieee80211_radiotap_header *radiotap_header, + int max_length, + const struct ieee80211_radiotap_vendor_namespaces *vns); -extern int ieee80211_radiotap_iterator_next( - struct ieee80211_radiotap_iterator *iterator); +int +ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator); extern const unsigned char rfc1042_header[6]; @@ -3307,7 +3308,7 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type, * * Return: 0 on success. -ENOMEM. */ -extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2); +int regulatory_hint(struct wiphy *wiphy, const char *alpha2); /** * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain @@ -3321,9 +3322,8 @@ extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2); * default channel settings will be disregarded. If no rule is found for a * channel on the regulatory domain the channel will be disabled. */ -extern void wiphy_apply_custom_regulatory( - struct wiphy *wiphy, - const struct ieee80211_regdomain *regd); +void wiphy_apply_custom_regulatory(struct wiphy *wiphy, + const struct ieee80211_regdomain *regd); /** * freq_reg_info - get regulatory information for the given frequency diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5b7a3da..e5f0290 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2877,14 +2877,14 @@ enum ieee80211_tpt_led_trigger_flags { }; #ifdef CONFIG_MAC80211_LEDS -extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); -extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); -extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); -extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); -extern char *__ieee80211_create_tpt_led_trigger( - struct ieee80211_hw *hw, unsigned int flags, - const struct ieee80211_tpt_blink *blink_table, - unsigned int blink_table_len); +char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); +char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); +char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); +char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); +char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, + unsigned int flags, + const struct ieee80211_tpt_blink *blink_table, + unsigned int blink_table_len); #endif /** * ieee80211_get_tx_led_name - get name of TX LED -- cgit v0.10.2 From 4fc7074703321e897cc05c0de858fba63e94e59f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:38 -0700 Subject: checksum: Remove extern from function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/checksum.h b/include/net/checksum.h index 600d1d7..8f59ca5 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -107,11 +107,11 @@ static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to) } struct sk_buff; -extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, - __be32 from, __be32 to, int pseudohdr); -extern void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, - const __be32 *from, const __be32 *to, - int pseudohdr); +void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, + __be32 from, __be32 to, int pseudohdr); +void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, + const __be32 *from, const __be32 *to, + int pseudohdr); static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, __be16 from, __be16 to, -- cgit v0.10.2 From 378307217ed9c318212ec3050d38d0e34b77604c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 31 Jul 2013 17:31:39 -0700 Subject: cls_cgroup.h netprio_cgroup.h: Remove extern from function prototypes There are a mix of function prototypes with and without extern in the kernel sources. Standardize on not using extern for function prototypes. Function prototypes don't need to be written with extern. extern is assumed by the compiler. Its use is as unnecessary as using auto to declare automatic/local variables in a block. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 0fee061..f55c145 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -24,7 +24,7 @@ struct cgroup_cls_state u32 classid; }; -extern void sock_update_classid(struct sock *sk); +void sock_update_classid(struct sock *sk); #if IS_BUILTIN(CONFIG_NET_CLS_CGROUP) static inline u32 task_cls_classid(struct task_struct *p) diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h index 50ab8c2..379dd5d 100644 --- a/include/net/netprio_cgroup.h +++ b/include/net/netprio_cgroup.h @@ -29,7 +29,7 @@ struct cgroup_netprio_state { struct cgroup_subsys_state css; }; -extern void sock_update_netprioidx(struct sock *sk); +void sock_update_netprioidx(struct sock *sk); #if IS_BUILTIN(CONFIG_NETPRIO_CGROUP) -- cgit v0.10.2 From 46b3a421903aa2a4bc9731ca4663cee3ea869dab Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Thu, 1 Aug 2013 08:54:47 +0200 Subject: ipv6: fib6_rules should return exact return value With the addition of the suppress operation (7764a45a8f1fe74d4f7d301eaca2e558e7e2831a ("fib_rules: add .suppress operation") we rely on accurate error reporting of the fib_rules.actions. fib6_rule_action always returned -EAGAIN in case we could not find a matching route and 0 if a rule was matched. This also included a match for blackhole or prohibited rule actions which could get suppressed by the new logic. So adapt fib6_rule_action to always return the correct error code as its counterpart fib4_rule_action does. This also fixes a possiblity of nullptr-deref where we don't find a table, thus rt == NULL. Because the condition rt != ip6_null_entry still holdes it seems we could later get a nullptr bug on dereference rt->dst. v2: a) Fixed a brain fart in the commit msg (the rule => a table, etc). No changes to the patch. Cc: Stefan Tomanek Cc: Hideaki YOSHIFUJI Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index e64e6a5..554a4fb 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -55,26 +55,33 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, struct fib6_table *table; struct net *net = rule->fr_net; pol_lookup_t lookup = arg->lookup_ptr; + int err = 0; switch (rule->action) { case FR_ACT_TO_TBL: break; case FR_ACT_UNREACHABLE: + err = -ENETUNREACH; rt = net->ipv6.ip6_null_entry; goto discard_pkt; default: case FR_ACT_BLACKHOLE: + err = -EINVAL; rt = net->ipv6.ip6_blk_hole_entry; goto discard_pkt; case FR_ACT_PROHIBIT: + err = -EACCES; rt = net->ipv6.ip6_prohibit_entry; goto discard_pkt; } table = fib6_get_table(net, rule->table); - if (table) - rt = lookup(net, table, flp6, flags); + if (!table) { + err = -EAGAIN; + goto out; + } + rt = lookup(net, table, flp6, flags); if (rt != net->ipv6.ip6_null_entry) { struct fib6_rule *r = (struct fib6_rule *)rule; @@ -101,6 +108,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, } again: ip6_rt_put(rt); + err = -EAGAIN; rt = NULL; goto out; @@ -108,7 +116,7 @@ discard_pkt: dst_hold(&rt->dst); out: arg->result = rt; - return rt == NULL ? -EAGAIN : 0; + return err; } static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) -- cgit v0.10.2 From e47f2509e5f182f4df144406de6f2bc78179d57e Mon Sep 17 00:00:00 2001 From: Fabio Baltieri Date: Thu, 25 Jul 2013 12:00:26 +0200 Subject: mac80211: use oneshot blink API for LED triggers Change mac80211 LED trigger code to use the generic led_trigger_blink_oneshot() API for transmit and receive activity indication. This gives a better feedback to the user, as with the new API each activity event results in a visible blink, while a constant traffic results in a continuous blink at constant rate. Signed-off-by: Fabio Baltieri [fix LED disabled build error] Signed-off-by: Johannes Berg diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3d32df1..d779383 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1094,7 +1094,6 @@ struct ieee80211_local { u32 dot11TransmittedFrameCount; #ifdef CONFIG_MAC80211_LEDS - int tx_led_counter, rx_led_counter; struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led; struct tpt_led_trigger *tpt_led_trigger; char tx_led_name[32], rx_led_name[32], diff --git a/net/mac80211/led.c b/net/mac80211/led.c index bcffa69..e2b8364 100644 --- a/net/mac80211/led.c +++ b/net/mac80211/led.c @@ -12,27 +12,22 @@ #include #include "led.h" +#define MAC80211_BLINK_DELAY 50 /* ms */ + void ieee80211_led_rx(struct ieee80211_local *local) { + unsigned long led_delay = MAC80211_BLINK_DELAY; if (unlikely(!local->rx_led)) return; - if (local->rx_led_counter++ % 2 == 0) - led_trigger_event(local->rx_led, LED_OFF); - else - led_trigger_event(local->rx_led, LED_FULL); + led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0); } -/* q is 1 if a packet was enqueued, 0 if it has been transmitted */ -void ieee80211_led_tx(struct ieee80211_local *local, int q) +void ieee80211_led_tx(struct ieee80211_local *local) { + unsigned long led_delay = MAC80211_BLINK_DELAY; if (unlikely(!local->tx_led)) return; - /* not sure how this is supposed to work ... */ - local->tx_led_counter += 2*q-1; - if (local->tx_led_counter % 2 == 0) - led_trigger_event(local->tx_led, LED_OFF); - else - led_trigger_event(local->tx_led, LED_FULL); + led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0); } void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) diff --git a/net/mac80211/led.h b/net/mac80211/led.h index e0275d9..89f4344 100644 --- a/net/mac80211/led.h +++ b/net/mac80211/led.h @@ -13,7 +13,7 @@ #ifdef CONFIG_MAC80211_LEDS void ieee80211_led_rx(struct ieee80211_local *local); -void ieee80211_led_tx(struct ieee80211_local *local, int q); +void ieee80211_led_tx(struct ieee80211_local *local); void ieee80211_led_assoc(struct ieee80211_local *local, bool associated); void ieee80211_led_radio(struct ieee80211_local *local, @@ -27,7 +27,7 @@ void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, static inline void ieee80211_led_rx(struct ieee80211_local *local) { } -static inline void ieee80211_led_tx(struct ieee80211_local *local, int q) +static inline void ieee80211_led_tx(struct ieee80211_local *local) { } static inline void ieee80211_led_assoc(struct ieee80211_local *local, diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 6ad4c14..8eabfd9 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -564,7 +564,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) rcu_read_unlock(); - ieee80211_led_tx(local, 0); + ieee80211_led_tx(local); /* SNMP counters * Fragments are passed to low-level drivers as separate skbs, so these diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index be4d3ca..f65873f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1300,7 +1300,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local, txpending); ieee80211_tpt_led_trig_tx(local, fc, led_len); - ieee80211_led_tx(local, 1); WARN_ON_ONCE(!skb_queue_empty(skbs)); -- cgit v0.10.2 From dad9defd28912dc408763d461b9a6b1a762c07ea Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Fri, 26 Jul 2013 21:15:02 +0200 Subject: mac80211: ibss - remove not authorized station earlier A station which is not authorized has to be purged earlier to give it a chance to re-try to establish an IBSS/RSN session soon. Set the timeout to 10 seconds. Some refactoring has also been done to allow the IBSS submodule to have its own expiring function. Reported-by: Simon Wunderlich Signed-off-by: Antonio Quartulli Signed-off-by: Johannes Berg diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 5e6836c..e08387c 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -30,6 +30,7 @@ #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) +#define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ) #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 @@ -740,6 +741,33 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) return active; } +static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta, *tmp; + unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT; + unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT; + + mutex_lock(&local->sta_mtx); + + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { + if (sdata != sta->sdata) + continue; + + if (time_after(jiffies, sta->last_rx + exp_time) || + (time_after(jiffies, sta->last_rx + exp_rsn_time) && + sta->sta_state != IEEE80211_STA_AUTHORIZED)) { + sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n", + sta->sta_state != IEEE80211_STA_AUTHORIZED ? + "not authorized " : "", sta->sta.addr); + + WARN_ON(__sta_info_destroy(sta)); + } + } + + mutex_unlock(&local->sta_mtx); +} + /* * This function is called with state == IEEE80211_IBSS_MLME_JOINED */ @@ -754,7 +782,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); - ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); + ieee80211_ibss_sta_expire(sdata); if (time_before(jiffies, ifibss->last_scan_completed + IEEE80211_IBSS_MERGE_INTERVAL)) -- cgit v0.10.2 From dcb7a6ce0a08012a17cca34711a02017abed554b Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Fri, 26 Jul 2013 17:02:29 -0700 Subject: ieee80211: add definition for interworking support IEEE802.11u interworking support is advertised via extended capabilities IE bit 31. This is 7th bit of 4th byte of extended capabilities. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: Johannes Berg diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b0dc87a..22c9094 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1860,6 +1860,11 @@ enum ieee80211_tdls_actioncode { WLAN_TDLS_DISCOVERY_REQUEST = 10, }; +/* Interworking capabilities are set in 7th bit of 4th byte of the + * @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA4_INTERWORKING_ENABLED BIT(7) + /* * TDLS capabililites to be enabled in the 5th byte of the * @WLAN_EID_EXT_CAPABILITY information element -- cgit v0.10.2 From 9e2bc79bce58a1ce0005015c9351b3bcaaa02e5c Mon Sep 17 00:00:00 2001 From: Fred Zhou Date: Thu, 1 Aug 2013 14:16:28 +0800 Subject: ieee80211: add definition for 802.11ac information elements Add element IDs for Extended BSS Load, VHT TX Power Envelope, AID, and Quiet Channel. Signed-off-by: Fred Zhou Signed-off-by: Johannes Berg diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 22c9094..b3ce299 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1709,6 +1709,10 @@ enum ieee80211_eid { WLAN_EID_OPMODE_NOTIF = 199, WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, + WLAN_EID_EXTENDED_BSS_LOAD = 193, + WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, + WLAN_EID_AID = 197, + WLAN_EID_QUIET_CHANNEL = 198, /* 802.11ad */ WLAN_EID_NON_TX_BSSID_CAP = 83, -- cgit v0.10.2 From a824131017a2c3c8f275c6eb46740cfb8a43f7c5 Mon Sep 17 00:00:00 2001 From: Karl Beldan Date: Sat, 27 Jul 2013 11:47:04 +0200 Subject: mac80211: report some VHT radiotap infos for tx status The radiotap VHT info is 12 bytes (required to be aligned on 2) : u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* u8 bandwidth u8 mcs_nss[4] u8 coding u8 group_id u16 partial_aid ATM mac80211 can handle IEEE80211_RADIOTAP_VHT_KNOWN_{GI,BANDWIDTH} and mcs_nss[0] (i.e single user) in simple cases. This is more a placeholder to let sniffers give more clues for VHT, since we don't have yet the proper infrastructure/conventions in mac80211 for complete feedback (e.g consider dynamic BW). Signed-off-by: Karl Beldan Signed-off-by: Johannes Berg diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 8eabfd9..368837f 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -235,7 +235,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info) /* IEEE80211_RADIOTAP_RATE rate */ if (info->status.rates[0].idx >= 0 && - !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) + !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | + IEEE80211_TX_RC_VHT_MCS))) len += 2; /* IEEE80211_RADIOTAP_TX_FLAGS */ @@ -244,16 +245,21 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info) /* IEEE80211_RADIOTAP_DATA_RETRIES */ len += 1; - /* IEEE80211_TX_RC_MCS */ - if (info->status.rates[0].idx >= 0 && - info->status.rates[0].flags & IEEE80211_TX_RC_MCS) - len += 3; + /* IEEE80211_RADIOTAP_MCS + * IEEE80211_RADIOTAP_VHT */ + if (info->status.rates[0].idx >= 0) { + if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) + len += 3; + else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) + len = ALIGN(len, 2) + 12; + } return len; } static void -ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band *sband, +ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, struct sk_buff *skb, int retry_count, int rtap_len, int shift) { @@ -280,7 +286,8 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band *sband, /* IEEE80211_RADIOTAP_RATE */ if (info->status.rates[0].idx >= 0 && - !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) { + !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | + IEEE80211_TX_RC_VHT_MCS))) { u16 rate; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); @@ -310,9 +317,12 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band *sband, *pos = retry_count; pos++; - /* IEEE80211_TX_RC_MCS */ - if (info->status.rates[0].idx >= 0 && - info->status.rates[0].flags & IEEE80211_TX_RC_MCS) { + if (info->status.rates[0].idx < 0) + return; + + /* IEEE80211_RADIOTAP_MCS + * IEEE80211_RADIOTAP_VHT */ + if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) { rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | @@ -325,8 +335,48 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band *sband, pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF; pos[2] = info->status.rates[0].idx; pos += 3; - } + } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { + u16 known = local->hw.radiotap_vht_details & + (IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH); + + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); + + /* required alignment from rthdr */ + pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2); + /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */ + put_unaligned_le16(known, pos); + pos += 2; + + /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */ + if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + pos++; + + /* u8 bandwidth */ + if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + *pos = 1; + else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + *pos = 4; + else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + *pos = 11; + else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */ + *pos = 0; + pos++; + + /* u8 mcs_nss[4] */ + *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) | + ieee80211_rate_get_vht_nss(&info->status.rates[0]); + pos += 4; + + /* u8 coding */ + pos++; + /* u8 group_id */ + pos++; + /* u16 partial_aid */ + pos += 2; + } } static void ieee80211_report_used_skb(struct ieee80211_local *local, @@ -631,8 +681,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb(skb); return; } - ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len, - shift); + ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count, + rtap_len, shift); /* XXX: is this sufficient for BPF? */ skb_set_mac_header(skb, 0); -- cgit v0.10.2 From 933e4af4c590c63509ffadeb77fcd1d329ac9155 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 22 Jul 2013 12:41:39 -0300 Subject: can: flexcan: Use devm_ioremap_resource() Using devm_ioremap_resource() can make the code simpler and smaller. Also, place alloc_candev() after of_match_device() to make error handling easier. Signed-off-by: Fabio Estevam Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 7b0be09..9ca27e2 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1001,7 +1001,6 @@ static int flexcan_probe(struct platform_device *pdev) struct resource *mem; struct clk *clk_ipg = NULL, *clk_per = NULL; void __iomem *base; - resource_size_t mem_size; int err, irq; u32 clock_freq = 0; @@ -1013,43 +1012,25 @@ static int flexcan_probe(struct platform_device *pdev) clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(clk_ipg)) { dev_err(&pdev->dev, "no ipg clock defined\n"); - err = PTR_ERR(clk_ipg); - goto failed_clock; + return PTR_ERR(clk_ipg); } clock_freq = clk_get_rate(clk_ipg); clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(clk_per)) { dev_err(&pdev->dev, "no per clock defined\n"); - err = PTR_ERR(clk_per); - goto failed_clock; + return PTR_ERR(clk_per); } } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!mem || irq <= 0) { - err = -ENODEV; - goto failed_get; - } - - mem_size = resource_size(mem); - if (!request_mem_region(mem->start, mem_size, pdev->name)) { - err = -EBUSY; - goto failed_get; - } + if (irq <= 0) + return -ENODEV; - base = ioremap(mem->start, mem_size); - if (!base) { - err = -ENOMEM; - goto failed_map; - } - - dev = alloc_candev(sizeof(struct flexcan_priv), 1); - if (!dev) { - err = -ENOMEM; - goto failed_alloc; - } + base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(base)) + return PTR_ERR(base); of_id = of_match_device(flexcan_of_match, &pdev->dev); if (of_id) { @@ -1058,10 +1039,13 @@ static int flexcan_probe(struct platform_device *pdev) devtype_data = (struct flexcan_devtype_data *) pdev->id_entry->driver_data; } else { - err = -ENODEV; - goto failed_devtype; + return -ENODEV; } + dev = alloc_candev(sizeof(struct flexcan_priv), 1); + if (!dev) + return -ENOMEM; + dev->netdev_ops = &flexcan_netdev_ops; dev->irq = irq; dev->flags |= IFF_ECHO; @@ -1104,28 +1088,15 @@ static int flexcan_probe(struct platform_device *pdev) return 0; failed_register: - failed_devtype: free_candev(dev); - failed_alloc: - iounmap(base); - failed_map: - release_mem_region(mem->start, mem_size); - failed_get: - failed_clock: return err; } static int flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); - struct flexcan_priv *priv = netdev_priv(dev); - struct resource *mem; unregister_flexcandev(dev); - iounmap(priv->base); - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(mem->start, resource_size(mem)); free_candev(dev); -- cgit v0.10.2 From aa10181bdac99a623517f0f2bfa14cd98c749246 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 22 Jul 2013 12:41:40 -0300 Subject: can: flexcan: Check the return value from clk_prepare_enable() clk_prepare_enable() may fail, so let's check its return value and propagate it in the case of error. Signed-off-by: Fabio Estevam Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 9ca27e2..c48174e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -850,12 +850,17 @@ static int flexcan_open(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); int err; - clk_prepare_enable(priv->clk_ipg); - clk_prepare_enable(priv->clk_per); + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + goto out_disable_ipg; err = open_candev(dev); if (err) - goto out; + goto out_disable_per; err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) @@ -875,8 +880,9 @@ static int flexcan_open(struct net_device *dev) out_close: close_candev(dev); - out: + out_disable_per: clk_disable_unprepare(priv->clk_per); + out_disable_ipg: clk_disable_unprepare(priv->clk_ipg); return err; @@ -933,8 +939,13 @@ static int register_flexcandev(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->base; u32 reg, err; - clk_prepare_enable(priv->clk_ipg); - clk_prepare_enable(priv->clk_per); + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + goto out_disable_ipg; /* select "bus clock", chip must be disabled */ flexcan_chip_disable(priv); @@ -959,15 +970,16 @@ static int register_flexcandev(struct net_device *dev) if (!(reg & FLEXCAN_MCR_FEN)) { netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); err = -ENODEV; - goto out; + goto out_disable_per; } err = register_candev(dev); - out: + out_disable_per: /* disable core and turn off clocks */ flexcan_chip_disable(priv); clk_disable_unprepare(priv->clk_per); + out_disable_ipg: clk_disable_unprepare(priv->clk_ipg); return err; -- cgit v0.10.2 From 7cf1f14ecf1f5025abb0e30e22e8f7ad219fa32e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 12 Jul 2013 10:40:31 +0200 Subject: mac80211: add debugfs for driver-buffered TID bitmap Add a per-station debugfs file indicating the TIDs (as a bitmap) that the driver has data buffered on. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 44e201d..19c54a4 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -455,6 +455,15 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); + if (sizeof(sta->driver_buffered_tids) == sizeof(u32)) + debugfs_create_x32("driver_buffered_tids", 0400, + sta->debugfs.dir, + (u32 *)&sta->driver_buffered_tids); + else + debugfs_create_x64("driver_buffered_tids", 0400, + sta->debugfs.dir, + (u64 *)&sta->driver_buffered_tids); + drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); } -- cgit v0.10.2 From 16ef1fe272332b2f7fd99236017b891db48d9cd6 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 11 Jul 2013 16:09:05 +0200 Subject: nl80211/cfg80211: add channel switch command To allow channel switch announcements within beacons, add the channel switch command to nl80211/cfg80211. This is implementation is intended for AP and (later) IBSS mode. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index aeaf6df..b7495c7 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -666,6 +666,30 @@ struct cfg80211_ap_settings { }; /** + * struct cfg80211_csa_settings - channel switch settings + * + * Used for channel switch + * + * @chandef: defines the channel to use after the switch + * @beacon_csa: beacon data while performing the switch + * @counter_offset_beacon: offset for the counter within the beacon (tail) + * @counter_offset_presp: offset for the counter within the probe response + * @beacon_after: beacon data to be used on the new channel + * @radar_required: whether radar detection is required on the new channel + * @block_tx: whether transmissions should be blocked while changing + * @count: number of beacons until switch + */ +struct cfg80211_csa_settings { + struct cfg80211_chan_def chandef; + struct cfg80211_beacon_data beacon_csa; + u16 counter_offset_beacon, counter_offset_presp; + struct cfg80211_beacon_data beacon_after; + bool radar_required; + bool block_tx; + u8 count; +}; + +/** * enum station_parameters_apply_mask - station parameter values to apply * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability @@ -2139,6 +2163,8 @@ struct cfg80211_update_ft_ies_params { * @crit_proto_stop: Indicates critical protocol no longer needs increased link * reliability. This operation can not fail. * @set_coalesce: Set coalesce parameters. + * + * @channel_switch: initiate channel-switch procedure (with CSA) */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -2376,6 +2402,10 @@ struct cfg80211_ops { struct wireless_dev *wdev); int (*set_coalesce)(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce); + + int (*channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_csa_settings *params); }; /* @@ -2441,6 +2471,8 @@ struct cfg80211_ops { * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. + * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in + * beaconing mode (AP, IBSS, Mesh, ...). */ enum wiphy_flags { WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), @@ -2465,6 +2497,7 @@ enum wiphy_flags { WIPHY_FLAG_OFFCHAN_TX = BIT(20), WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), + WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index eb68735..1f42bc3 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -676,6 +676,16 @@ * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules. * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules. * + * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the + * the new channel information (Channel Switch Announcement - CSA) + * in the beacon for some time (as defined in the + * %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the + * new channel. Userspace provides the new channel information (using + * %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel + * width). %NL80211_ATTR_CH_SWITCH_BLOCK_TX may be supplied to inform + * other station that transmission must be blocked until the channel + * switch is complete. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -841,6 +851,8 @@ enum nl80211_commands { NL80211_CMD_GET_COALESCE, NL80211_CMD_SET_COALESCE, + NL80211_CMD_CHANNEL_SWITCH, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1469,6 +1481,18 @@ enum nl80211_commands { * * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information. * + * @NL80211_ATTR_CH_SWITCH_COUNT: u32 attribute specifying the number of TBTT's + * until the channel switch event. + * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission + * must be blocked on the current channel (before the channel switch + * operation). + * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information + * for the time while performing a channel switch. + * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter + * field in the beacons tail (%NL80211_ATTR_BEACON_TAIL). + * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter + * field in the probe response (%NL80211_ATTR_PROBE_RESP). + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1771,6 +1795,12 @@ enum nl80211_attrs { NL80211_ATTR_COALESCE_RULE, + NL80211_ATTR_CH_SWITCH_COUNT, + NL80211_ATTR_CH_SWITCH_BLOCK_TX, + NL80211_ATTR_CSA_IES, + NL80211_ATTR_CSA_C_OFF_BEACON, + NL80211_ATTR_CSA_C_OFF_PRESP, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 03d4ef9..f7cb121 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -349,6 +349,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 }, + [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, + [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, + [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, + [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 }, + [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 }, }; /* policy for the key attributes */ @@ -1422,6 +1427,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev, if (state->split) { CMD(crit_proto_start, CRIT_PROTOCOL_START); CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); + if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) + CMD(channel_switch, CHANNEL_SWITCH); } #ifdef CONFIG_NL80211_TESTMODE @@ -5613,6 +5620,111 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, return err; } +static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_csa_settings params; + /* csa_attrs is defined static to avoid waste of stack size - this + * function is called under RTNL lock, so this should not be a problem. + */ + static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1]; + u8 radar_detect_width = 0; + int err; + + if (!rdev->ops->channel_switch || + !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) + return -EOPNOTSUPP; + + /* may add IBSS support later */ + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) + return -EOPNOTSUPP; + + memset(¶ms, 0, sizeof(params)); + + if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || + !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) + return -EINVAL; + + /* only important for AP, IBSS and mesh create IEs internally */ + if (!info->attrs[NL80211_ATTR_CSA_IES]) + return -EINVAL; + + /* useless if AP is not running */ + if (!wdev->beacon_interval) + return -EINVAL; + + params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); + + err = nl80211_parse_beacon(info->attrs, ¶ms.beacon_after); + if (err) + return err; + + err = nla_parse_nested(csa_attrs, NL80211_ATTR_MAX, + info->attrs[NL80211_ATTR_CSA_IES], + nl80211_policy); + if (err) + return err; + + err = nl80211_parse_beacon(csa_attrs, ¶ms.beacon_csa); + if (err) + return err; + + if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]) + return -EINVAL; + + params.counter_offset_beacon = + nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); + if (params.counter_offset_beacon >= params.beacon_csa.tail_len) + return -EINVAL; + + /* sanity check - counters should be the same */ + if (params.beacon_csa.tail[params.counter_offset_beacon] != + params.count) + return -EINVAL; + + if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) { + params.counter_offset_presp = + nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); + if (params.counter_offset_presp >= + params.beacon_csa.probe_resp_len) + return -EINVAL; + + if (params.beacon_csa.probe_resp[params.counter_offset_presp] != + params.count) + return -EINVAL; + } + + err = nl80211_parse_chandef(rdev, info, ¶ms.chandef); + if (err) + return err; + + if (!cfg80211_reg_can_beacon(&rdev->wiphy, ¶ms.chandef)) + return -EINVAL; + + err = cfg80211_chandef_dfs_required(wdev->wiphy, ¶ms.chandef); + if (err < 0) { + return err; + } else if (err) { + radar_detect_width = BIT(params.chandef.width); + params.radar_required = true; + } + + err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, + params.chandef.chan, + CHAN_MODE_SHARED, + radar_detect_width); + if (err) + return err; + + if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX]) + params.block_tx = true; + + return rdev_channel_switch(rdev, dev, ¶ms); +} + static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, u32 seq, int flags, struct cfg80211_registered_device *rdev, @@ -9361,7 +9473,15 @@ static struct genl_ops nl80211_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, - } + }, + { + .cmd = NL80211_CMD_CHANNEL_SWITCH, + .doit = nl80211_channel_switch, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, }; static struct genl_multicast_group nl80211_mlme_mcgrp = { diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 9f15f0a..de870d4 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -923,4 +923,16 @@ static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev, trace_rdev_return_void(&rdev->wiphy); } +static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + int ret; + + trace_rdev_channel_switch(&rdev->wiphy, dev, params); + ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 09af6eb..f0ebdcd 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1841,6 +1841,39 @@ TRACE_EVENT(rdev_crit_proto_stop, WIPHY_PR_ARG, WDEV_PR_ARG) ); +TRACE_EVENT(rdev_channel_switch, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_csa_settings *params), + TP_ARGS(wiphy, netdev, params), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + CHAN_DEF_ENTRY + __field(u16, counter_offset_beacon) + __field(u16, counter_offset_presp) + __field(bool, radar_required) + __field(bool, block_tx) + __field(u8, count) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + CHAN_DEF_ASSIGN(¶ms->chandef); + __entry->counter_offset_beacon = params->counter_offset_beacon; + __entry->counter_offset_presp = params->counter_offset_presp; + __entry->radar_required = params->radar_required; + __entry->block_tx = params->block_tx; + __entry->count = params->count; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT + ", block_tx: %d, count: %u, radar_required: %d" + ", counter offsets (beacon/presp): %u/%u", + WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, + __entry->block_tx, __entry->count, __entry->radar_required, + __entry->counter_offset_beacon, + __entry->counter_offset_presp) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ -- cgit v0.10.2 From 73da7d5bab79ad7e16ff44d67c3fe8b9c0b33e5b Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 11 Jul 2013 16:09:06 +0200 Subject: mac80211: add channel switch command and beacon callbacks The count field in CSA must be decremented with each beacon transmitted. This patch implements the functionality for drivers using ieee80211_beacon_get(). Other drivers must call back manually after reaching count == 0. This patch also contains the handling and finish worker for the channel switch command, and mac80211/chanctx code to allow to change a channel definition of an active channel context. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer [small cleanups, catch identical chandef] Signed-off-by: Johannes Berg diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 3124036..9cda372 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -152,11 +152,14 @@ struct ieee80211_low_level_stats { * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed + * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel, + * this is used only with channel switching with CSA */ enum ieee80211_chanctx_change { IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0), IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1), IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2), + IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3), }; /** @@ -1084,6 +1087,7 @@ enum ieee80211_vif_flags { * @addr: address of this interface * @p2p: indicates whether this AP or STA interface is a p2p * interface, i.e. a GO or p2p-sta respectively + * @csa_active: marks whether a channel switch is going on * @driver_flags: flags/capabilities the driver has for this interface, * these need to be set (or cleared) when the interface is added * or, if supported by the driver, the interface type is changed @@ -1106,6 +1110,7 @@ struct ieee80211_vif { struct ieee80211_bss_conf bss_conf; u8 addr[ETH_ALEN]; bool p2p; + bool csa_active; u8 cab_queue; u8 hw_queue[IEEE80211_NUM_ACS]; @@ -2637,6 +2642,16 @@ enum ieee80211_roc_type { * @ipv6_addr_change: IPv6 address assignment on the given interface changed. * Currently, this is only called for managed or P2P client interfaces. * This callback is optional; it must not sleep. + * + * @channel_switch_beacon: Starts a channel switch to a new channel. + * Beacons are modified to include CSA or ECSA IEs before calling this + * function. The corresponding count fields in these IEs must be + * decremented, and when they reach zero the driver must call + * ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get() + * get the csa counter decremented by mac80211, but must check if it is + * zero using ieee80211_csa_is_complete() after the beacon has been + * transmitted and then call ieee80211_csa_finish(). + * */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -2824,6 +2839,9 @@ struct ieee80211_ops { struct ieee80211_vif *vif, struct inet6_dev *idev); #endif + void (*channel_switch_beacon)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef); }; /** @@ -3319,6 +3337,25 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, } /** + * ieee80211_csa_finish - notify mac80211 about channel switch + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * After a channel switch announcement was scheduled and the counter in this + * announcement hit zero, this function must be called by the driver to + * notify mac80211 that the channel can be changed. + */ +void ieee80211_csa_finish(struct ieee80211_vif *vif); + +/** + * ieee80211_csa_is_complete - find out if counters reached zero + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * This function returns whether the channel switch counters reached zero. + */ +bool ieee80211_csa_is_complete(struct ieee80211_vif *vif); + + +/** * ieee80211_proberesp_get - retrieve a Probe Response template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b82fff6..44449ce 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -860,8 +860,8 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, return 0; } -static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, - struct cfg80211_beacon_data *params) +int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_beacon_data *params) { struct beacon_data *new, *old; int new_head_len, new_tail_len; @@ -1024,6 +1024,12 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); + /* don't allow changing the beacon while CSA is in place - offset + * of channel switch counter may change + */ + if (sdata->vif.csa_active) + return -EBUSY; + old = rtnl_dereference(sdata->u.ap.beacon); if (!old) return -ENOENT; @@ -1048,6 +1054,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) return -ENOENT; old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp); + /* abort any running channel switch */ + sdata->vif.csa_active = false; + cancel_work_sync(&sdata->csa_finalize_work); + /* turn off carrier for this interface and dependent VLANs */ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) netif_carrier_off(vlan->dev); @@ -2775,6 +2785,178 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy, return 0; } +static struct cfg80211_beacon_data * +cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) +{ + struct cfg80211_beacon_data *new_beacon; + u8 *pos; + int len; + + len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + + beacon->proberesp_ies_len + beacon->assocresp_ies_len + + beacon->probe_resp_len; + + new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); + if (!new_beacon) + return NULL; + + pos = (u8 *)(new_beacon + 1); + if (beacon->head_len) { + new_beacon->head_len = beacon->head_len; + new_beacon->head = pos; + memcpy(pos, beacon->head, beacon->head_len); + pos += beacon->head_len; + } + if (beacon->tail_len) { + new_beacon->tail_len = beacon->tail_len; + new_beacon->tail = pos; + memcpy(pos, beacon->tail, beacon->tail_len); + pos += beacon->tail_len; + } + if (beacon->beacon_ies_len) { + new_beacon->beacon_ies_len = beacon->beacon_ies_len; + new_beacon->beacon_ies = pos; + memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len); + pos += beacon->beacon_ies_len; + } + if (beacon->proberesp_ies_len) { + new_beacon->proberesp_ies_len = beacon->proberesp_ies_len; + new_beacon->proberesp_ies = pos; + memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len); + pos += beacon->proberesp_ies_len; + } + if (beacon->assocresp_ies_len) { + new_beacon->assocresp_ies_len = beacon->assocresp_ies_len; + new_beacon->assocresp_ies = pos; + memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len); + pos += beacon->assocresp_ies_len; + } + if (beacon->probe_resp_len) { + new_beacon->probe_resp_len = beacon->probe_resp_len; + beacon->probe_resp = pos; + memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); + pos += beacon->probe_resp_len; + } + + return new_beacon; +} + +void ieee80211_csa_finalize_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + csa_finalize_work); + struct ieee80211_local *local = sdata->local; + int err, changed; + + if (!ieee80211_sdata_running(sdata)) + return; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP)) + return; + + sdata->radar_required = sdata->csa_radar_required; + err = ieee80211_vif_change_channel(sdata, &local->csa_chandef, + &changed); + if (WARN_ON(err < 0)) + return; + + err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon); + if (err < 0) + return; + + changed |= err; + kfree(sdata->u.ap.next_beacon); + sdata->u.ap.next_beacon = NULL; + sdata->vif.csa_active = false; + + ieee80211_wake_queues_by_reason(&sdata->local->hw, + IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_CSA); + + ieee80211_bss_info_change_notify(sdata, changed); + + cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef); +} + +static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_chanctx *chanctx; + int err, num_chanctx; + + if (!list_empty(&local->roc_list) || local->scanning) + return -EBUSY; + + if (sdata->wdev.cac_started) + return -EBUSY; + + if (cfg80211_chandef_identical(¶ms->chandef, + &sdata->vif.bss_conf.chandef)) + return -EINVAL; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (!chanctx_conf) { + rcu_read_unlock(); + return -EBUSY; + } + + /* don't handle for multi-VIF cases */ + chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); + if (chanctx->refcount > 1) { + rcu_read_unlock(); + return -EBUSY; + } + num_chanctx = 0; + list_for_each_entry_rcu(chanctx, &local->chanctx_list, list) + num_chanctx++; + rcu_read_unlock(); + + if (num_chanctx > 1) + return -EBUSY; + + /* don't allow another channel switch if one is already active. */ + if (sdata->vif.csa_active) + return -EBUSY; + + /* only handle AP for now. */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + break; + default: + return -EOPNOTSUPP; + } + + sdata->u.ap.next_beacon = cfg80211_beacon_dup(¶ms->beacon_after); + if (!sdata->u.ap.next_beacon) + return -ENOMEM; + + sdata->csa_counter_offset_beacon = params->counter_offset_beacon; + sdata->csa_counter_offset_presp = params->counter_offset_presp; + sdata->csa_radar_required = params->radar_required; + + if (params->block_tx) + ieee80211_stop_queues_by_reason(&local->hw, + IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_CSA); + + err = ieee80211_assign_beacon(sdata, ¶ms->beacon_csa); + if (err < 0) + return err; + + local->csa_chandef = params->chandef; + sdata->vif.csa_active = true; + + ieee80211_bss_info_change_notify(sdata, err); + drv_channel_switch_beacon(sdata, ¶ms->chandef); + + return 0; +} + static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, bool offchan, unsigned int wait, const u8 *buf, size_t len, @@ -3492,4 +3674,5 @@ struct cfg80211_ops mac80211_config_ops = { .get_et_strings = ieee80211_get_et_strings, .get_channel = ieee80211_cfg_get_channel, .start_radar_detection = ieee80211_start_radar_detection, + .channel_switch = ieee80211_channel_switch, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 03e8d2e..3a4764b 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -410,6 +410,64 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, return ret; } +int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + u32 *changed) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + int ret; + u32 chanctx_changed = 0; + + /* should never be called if not performing a channel switch. */ + if (WARN_ON(!sdata->vif.csa_active)) + return -EINVAL; + + if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, + IEEE80211_CHAN_DISABLED)) + return -EINVAL; + + mutex_lock(&local->chanctx_mtx); + conf = rcu_dereference_protected(sdata->vif.chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) { + ret = -EINVAL; + goto out; + } + + ctx = container_of(conf, struct ieee80211_chanctx, conf); + if (ctx->refcount != 1) { + ret = -EINVAL; + goto out; + } + + if (sdata->vif.bss_conf.chandef.width != chandef->width) { + chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH; + *changed |= BSS_CHANGED_BANDWIDTH; + } + + sdata->vif.bss_conf.chandef = *chandef; + ctx->conf.def = *chandef; + + chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL; + drv_change_chanctx(local, ctx, chanctx_changed); + + if (!local->use_chanctx) { + local->_oper_chandef = *chandef; + ieee80211_hw_config(local, 0); + } + + ieee80211_recalc_chanctx_chantype(local, ctx); + ieee80211_recalc_smps_chanctx(local, ctx); + ieee80211_recalc_radar_chanctx(local, ctx); + + ret = 0; + out: + mutex_unlock(&local->chanctx_mtx); + return ret; +} + int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, u32 *changed) diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index b931c96..b3ea11f 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1072,4 +1072,17 @@ static inline void drv_ipv6_addr_change(struct ieee80211_local *local, } #endif +static inline void +drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = sdata->local; + + if (local->ops->channel_switch_beacon) { + trace_drv_channel_switch_beacon(local, sdata, chandef); + local->ops->channel_switch_beacon(&local->hw, &sdata->vif, + chandef); + } +} + #endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d779383..e94c840 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -259,6 +259,8 @@ struct ieee80211_if_ap { struct beacon_data __rcu *beacon; struct probe_resp __rcu *probe_resp; + /* to be used after channel switch. */ + struct cfg80211_beacon_data *next_beacon; struct list_head vlans; struct ps_data ps; @@ -716,6 +718,11 @@ struct ieee80211_sub_if_data { struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; + struct work_struct csa_finalize_work; + int csa_counter_offset_beacon; + int csa_counter_offset_presp; + bool csa_radar_required; + /* used to reconfigure hardware SM PS */ struct work_struct recalc_smps; @@ -1372,6 +1379,9 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); void ieee80211_sw_roc_work(struct work_struct *work); void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); +/* channel switch handling */ +void ieee80211_csa_finalize_work(struct work_struct *work); + /* interface handling */ int ieee80211_iface_init(void); void ieee80211_iface_exit(void); @@ -1393,6 +1403,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local); bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata); +int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, + struct cfg80211_beacon_data *params); static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) { @@ -1654,6 +1666,11 @@ int __must_check ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, u32 *changed); +/* NOTE: only use ieee80211_vif_change_channel() for channel switch */ +int __must_check +ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + u32 *changed); void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 4c41c11..7ca534b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -274,6 +274,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, if (iftype == NL80211_IFTYPE_ADHOC && nsdata->vif.type == NL80211_IFTYPE_ADHOC) return -EBUSY; + /* + * will not add another interface while any channel + * switch is active. + */ + if (nsdata->vif.csa_active) + return -EBUSY; /* * The remaining checks are only performed for interfaces @@ -804,6 +810,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, cancel_work_sync(&local->dynamic_ps_enable_work); cancel_work_sync(&sdata->recalc_smps); + sdata->vif.csa_active = false; + cancel_work_sync(&sdata->csa_finalize_work); cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); @@ -1267,6 +1275,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, skb_queue_head_init(&sdata->skb_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); + INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work); switch (type) { case NL80211_IFTYPE_P2P_GO: diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index c215fafd7..1aba645 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -1906,6 +1906,32 @@ TRACE_EVENT(api_radar_detected, ) ); +TRACE_EVENT(drv_channel_switch_beacon, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct cfg80211_chan_def *chandef), + + TP_ARGS(local, sdata, chandef), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + CHANDEF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + CHANDEF_ASSIGN(chandef); + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG + ) +); + + #ifdef CONFIG_MAC80211_MESSAGE_TRACING #undef TRACE_SYSTEM #define TRACE_SYSTEM mac80211_msg diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f65873f..0e42322 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2338,6 +2338,81 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, return 0; } +void ieee80211_csa_finish(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + ieee80211_queue_work(&sdata->local->hw, + &sdata->csa_finalize_work); +} +EXPORT_SYMBOL(ieee80211_csa_finish); + +static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata, + struct beacon_data *beacon) +{ + struct probe_resp *resp; + int counter_offset_beacon = sdata->csa_counter_offset_beacon; + int counter_offset_presp = sdata->csa_counter_offset_presp; + + /* warn if the driver did not check for/react to csa completeness */ + if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0)) + return; + + ((u8 *)beacon->tail)[counter_offset_beacon]--; + + if (sdata->vif.type == NL80211_IFTYPE_AP && + counter_offset_presp) { + rcu_read_lock(); + resp = rcu_dereference(sdata->u.ap.probe_resp); + + /* if nl80211 accepted the offset, this should not happen. */ + if (WARN_ON(!resp)) { + rcu_read_unlock(); + return; + } + resp->data[counter_offset_presp]--; + rcu_read_unlock(); + } +} + +bool ieee80211_csa_is_complete(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct beacon_data *beacon = NULL; + u8 *beacon_data; + size_t beacon_data_len; + int counter_beacon = sdata->csa_counter_offset_beacon; + int ret = false; + + if (!ieee80211_sdata_running(sdata)) + return false; + + rcu_read_lock(); + if (vif->type == NL80211_IFTYPE_AP) { + struct ieee80211_if_ap *ap = &sdata->u.ap; + + beacon = rcu_dereference(ap->beacon); + if (WARN_ON(!beacon || !beacon->tail)) + goto out; + beacon_data = beacon->tail; + beacon_data_len = beacon->tail_len; + } else { + WARN_ON(1); + goto out; + } + + if (WARN_ON(counter_beacon > beacon_data_len)) + goto out; + + if (beacon_data[counter_beacon] == 0) + ret = true; + out: + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL(ieee80211_csa_is_complete); + struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 *tim_offset, u16 *tim_length) @@ -2368,6 +2443,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct beacon_data *beacon = rcu_dereference(ap->beacon); if (beacon) { + if (sdata->vif.csa_active) + ieee80211_update_csa(sdata, beacon); + /* * headroom, head length, * tail length and maximum TIM length -- cgit v0.10.2 From c3afd99fb5adfb31925f0b493a0d4152cd6447cc Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 30 Jul 2013 17:18:15 -0700 Subject: mwifiex: fix adapter pointer dereference issue It has introduced by recent commit 6b41f941d7cd: "mwifiex: handle driver initialization error paths" which adds error path handling for mwifiex_fw_dpc(). release_firmware(adapter->*) is called for success as well as failure paths. In failure paths, adapter is already freed at this point. The issue is fixed by moving mwifiex_free_adapter() call. Reported-by: Dan Carpenter Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 5644c7f..3402bff 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -414,6 +414,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) struct mwifiex_private *priv; struct mwifiex_adapter *adapter = context; struct mwifiex_fw_image fw; + struct semaphore *sem = adapter->card_sem; + bool init_failed = false; if (!firmware) { dev_err(adapter->dev, @@ -528,15 +530,20 @@ err_dnld_fw: } adapter->surprise_removed = true; mwifiex_terminate_workqueue(adapter); - mwifiex_free_adapter(adapter); + init_failed = true; done: if (adapter->cal_data) { release_firmware(adapter->cal_data); adapter->cal_data = NULL; } - release_firmware(adapter->firmware); + if (adapter->firmware) { + release_firmware(adapter->firmware); + adapter->firmware = NULL; + } complete(&adapter->fw_load); - up(adapter->card_sem); + if (init_failed) + mwifiex_free_adapter(adapter); + up(sem); return; } -- cgit v0.10.2 From 78dfca62f1a0a8d00b155e48197a565da18aebd9 Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Tue, 30 Jul 2013 17:18:56 -0700 Subject: mwifiex: populate rates in probe request using cfg80211_scan_request Whenever available, use cfg80211_scan_request to populates rates in outgoing probe request. This will help to advertise band specific rates and fix an issue where 11b rates were advertised in probe request going out on 11a band. This will also ensure that we do not advertise 11b rates while P2P scan is going on. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c index 988552d..913bb63 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/mwifiex/cfp.c @@ -404,11 +404,43 @@ mwifiex_is_rate_auto(struct mwifiex_private *priv) return false; } -/* - * This function gets the supported data rates. - * - * The function works in both Ad-Hoc and infra mode by printing the - * band and returning the data rates. +/* This function gets the supported data rates from bitmask inside + * cfg80211_scan_request. + */ +u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv, + u8 *rates, u8 radio_type) +{ + struct wiphy *wiphy = priv->adapter->wiphy; + struct cfg80211_scan_request *request = priv->scan_request; + u32 num_rates, rate_mask; + struct ieee80211_supported_band *sband; + int i; + + if (radio_type) { + sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + if (WARN_ON_ONCE(!sband)) + return 0; + rate_mask = request->rates[IEEE80211_BAND_5GHZ]; + } else { + sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + if (WARN_ON_ONCE(!sband)) + return 0; + rate_mask = request->rates[IEEE80211_BAND_2GHZ]; + } + + num_rates = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if ((BIT(i) & rate_mask) == 0) + continue; /* skip rate */ + rates[num_rates++] = (u8)(sband->bitrates[i].bitrate / 5); + } + + return num_rates; +} + +/* This function gets the supported data rates. The function works in + * both Ad-Hoc and infra mode by printing the band and returning the + * data rates. */ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) { diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 9ee3b1b..d2e5ccd 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -902,6 +902,8 @@ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask, u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv, u8 *rates); u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates); +u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv, + u8 *rates, u8 radio_type); u8 mwifiex_is_rate_auto(struct mwifiex_private *priv); extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE]; void mwifiex_save_curr_bcn(struct mwifiex_private *priv); diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index c447d9b..8cf7d50 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -543,6 +543,37 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, return chan_idx; } +/* This function appends rate TLV to scan config command. */ +static int +mwifiex_append_rate_tlv(struct mwifiex_private *priv, + struct mwifiex_scan_cmd_config *scan_cfg_out, + u8 radio) +{ + struct mwifiex_ie_types_rates_param_set *rates_tlv; + u8 rates[MWIFIEX_SUPPORTED_RATES], *tlv_pos; + u32 rates_size; + + memset(rates, 0, sizeof(rates)); + + tlv_pos = (u8 *)scan_cfg_out->tlv_buf + scan_cfg_out->tlv_buf_len; + + if (priv->scan_request) + rates_size = mwifiex_get_rates_from_cfg80211(priv, rates, + radio); + else + rates_size = mwifiex_get_supported_rates(priv, rates); + + dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n", + rates_size); + rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos; + rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES); + rates_tlv->header.len = cpu_to_le16((u16) rates_size); + memcpy(rates_tlv->rates, rates, rates_size); + scan_cfg_out->tlv_buf_len += sizeof(rates_tlv->header) + rates_size; + + return rates_size; +} + /* * This function constructs and sends multiple scan config commands to * the firmware. @@ -564,9 +595,10 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, struct mwifiex_chan_scan_param_set *tmp_chan_list; struct mwifiex_chan_scan_param_set *start_chan; - u32 tlv_idx; + u32 tlv_idx, rates_size; u32 total_scan_time; u32 done_early; + u8 radio_type; if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) { dev_dbg(priv->adapter->dev, @@ -591,6 +623,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, tlv_idx = 0; total_scan_time = 0; + radio_type = 0; chan_tlv_out->header.len = 0; start_chan = tmp_chan_list; done_early = false; @@ -612,6 +645,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, continue; } + radio_type = tmp_chan_list->radio_type; dev_dbg(priv->adapter->dev, "info: Scan: Chan(%3d), Radio(%d)," " Mode(%d, %d), Dur(%d)\n", @@ -692,6 +726,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, break; } + rates_size = mwifiex_append_rate_tlv(priv, scan_cfg_out, + radio_type); + priv->adapter->scan_channels = start_chan; /* Send the scan command to the firmware with the specified @@ -699,6 +736,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN, HostCmd_ACT_GEN_SET, 0, scan_cfg_out); + + /* rate IE is updated per scan command but same starting + * pointer is used each time so that rate IE from earlier + * scan_cfg_out->buf is overwritten with new one. + */ + scan_cfg_out->tlv_buf_len -= + sizeof(struct mwifiex_ie_types_header) + rates_size; + if (ret) break; } @@ -741,7 +786,6 @@ mwifiex_config_scan(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_num_probes *num_probes_tlv; struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; - struct mwifiex_ie_types_rates_param_set *rates_tlv; u8 *tlv_pos; u32 num_probes; u32 ssid_len; @@ -753,8 +797,6 @@ mwifiex_config_scan(struct mwifiex_private *priv, u8 radio_type; int i; u8 ssid_filter; - u8 rates[MWIFIEX_SUPPORTED_RATES]; - u32 rates_size; struct mwifiex_ie_types_htcap *ht_cap; /* The tlv_buf_len is calculated for each scan command. The TLVs added @@ -889,19 +931,6 @@ mwifiex_config_scan(struct mwifiex_private *priv, } - /* Append rates tlv */ - memset(rates, 0, sizeof(rates)); - - rates_size = mwifiex_get_supported_rates(priv, rates); - - rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos; - rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES); - rates_tlv->header.len = cpu_to_le16((u16) rates_size); - memcpy(rates_tlv->rates, rates, rates_size); - tlv_pos += sizeof(rates_tlv->header) + rates_size; - - dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size); - if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) && (priv->adapter->config_bands & BAND_GN || priv->adapter->config_bands & BAND_AN)) { -- cgit v0.10.2 From d28e5e5d96cde070d794553d16702ca6879bb7a2 Mon Sep 17 00:00:00 2001 From: Mark Schulte Date: Wed, 31 Jul 2013 19:30:14 -0700 Subject: rtlwifi: rtl8192cu: fix function declaration header Move rtl92cu_update_hal_rate_tbl declaration to hw.h to correspond with function definition in hw.c. Fixes sparse warning in hw.c warning: symbol 'rtl92cu_update_hal_rate_tbl' was not declared. Signed-off-by: Mark Schulte Acked-by: Larry Finger Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h index 8e3ec1e..0f7812e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h @@ -109,5 +109,8 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw); +void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h index 262e1e4..a1310ab 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h @@ -49,8 +49,5 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask); void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); -void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level); #endif -- cgit v0.10.2 From 376c7311bdb6efea3322310333576a04d73fbe4c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 1 Aug 2013 11:43:08 -0700 Subject: net: add a temporary sanity check in skb_orphan() David suggested to add a BUG_ON() to catch if some layer sets skb->sk pointer without a corresponding destructor. As skb can sit in a queue, it's mandatory to make sure the socket cannot disappear, and it's usually done by taking a reference on the socket, then releasing it from the skb destructor. This patch is a follow-up to commit c34a761231b5 ("net: skb_orphan() changes") and will be reverted after catching all possible offenders if any. Suggested-by: David Miller Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a95547a..d48de68 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1809,6 +1809,8 @@ static inline void skb_orphan(struct sk_buff *skb) skb->destructor(skb); skb->destructor = NULL; skb->sk = NULL; + } else { + BUG_ON(skb->sk); } } -- cgit v0.10.2 From 092c46498e76904a3c21124131a29edff56ce3ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Tue, 25 Jun 2013 10:13:46 +0200 Subject: ssb: drop BROKEN from SSB_SFLASH MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With recent patches ssb can fetch info about serial flash and register it as a platform device. No more reasons to mark it BROKEN. Signed-off-by: RafaÅ‚ MiÅ‚ecki Cc: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index 36171fd..2cd9b0e 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -138,7 +138,7 @@ config SSB_DRIVER_MIPS config SSB_SFLASH bool "SSB serial flash support" - depends on SSB_DRIVER_MIPS && BROKEN + depends on SSB_DRIVER_MIPS default y # Assumption: We are on embedded, if we compile the MIPS core. diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c index e84cf04..50328de 100644 --- a/drivers/ssb/driver_chipcommon_sflash.c +++ b/drivers/ssb/driver_chipcommon_sflash.c @@ -151,8 +151,8 @@ int ssb_sflash_init(struct ssb_chipcommon *cc) sflash->size = sflash->blocksize * sflash->numblocks; sflash->present = true; - pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n", - e->name, e->blocksize, e->numblocks); + pr_info("Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n", + e->name, sflash->size / 1024, e->blocksize, e->numblocks); /* Prepare platform device, but don't register it yet. It's too early, * malloc (required by device_private_init) is not available yet. */ @@ -160,7 +160,5 @@ int ssb_sflash_init(struct ssb_chipcommon *cc) sflash->size; ssb_sflash_dev.dev.platform_data = sflash; - pr_err("Serial flash support is not implemented yet!\n"); - - return -ENOTSUPP; + return 0; } -- cgit v0.10.2 From c1de4a9557d9e25e41fc4ba034b9659152205539 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 1 Jul 2013 14:19:30 +0200 Subject: iwl4965: better skb management in rx path 4965 version of Eric patch "iwl3945: better skb management in rx path". It fixes several problems : 1) skb->truesize is underestimated. We really consume PAGE_SIZE bytes for a fragment, not the frame length. 2) 128 bytes of initial headroom is a bit low and forces reallocations. 3) We can avoid consuming a full page for small enough frames. Signed-off-by: Stanislaw Gruszka Acked-by: Eric Dumazet Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b9b2bb5..c4b22e1 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -574,9 +574,11 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in) return decrypt_out; } +#define SMALL_PACKET_SIZE 256 + static void il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, - u16 len, u32 ampdu_status, struct il_rx_buf *rxb, + u32 len, u32 ampdu_status, struct il_rx_buf *rxb, struct ieee80211_rx_status *stats) { struct sk_buff *skb; @@ -598,21 +600,25 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, il_set_decrypted_flag(il, hdr, ampdu_status, stats)) return; - skb = dev_alloc_skb(128); + skb = dev_alloc_skb(SMALL_PACKET_SIZE); if (!skb) { IL_ERR("dev_alloc_skb failed\n"); return; } - skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len, - len); + if (len <= SMALL_PACKET_SIZE) { + memcpy(skb_put(skb, len), hdr, len); + } else { + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), + len, PAGE_SIZE << il->hw_params.rx_page_order); + il->alloc_rxb_page--; + rxb->page = NULL; + } il_update_stats(il, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(il->hw, skb); - il->alloc_rxb_page--; - rxb->page = NULL; } /* Called for N_RX (legacy ABG frames), or -- cgit v0.10.2 From 4eba10cc8086e3dde09f00e44b43ff4f8624d527 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 29 Jul 2013 16:04:49 +0530 Subject: ath9k: Add a debugfs file for antenna diversity Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 87454f6..d8764ae 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -321,6 +321,68 @@ static const struct file_operations fops_ant_diversity = { .llseek = default_llseek, }; +static ssize_t read_file_antenna_diversity(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_hw *ah = sc->sc_ah; + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN]; + struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT]; + unsigned int len = 0, size = 1024; + ssize_t retval = 0; + char *buf; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) { + len += snprintf(buf + len, size - len, "%s\n", + "Antenna Diversity Combining is disabled"); + goto exit; + } + + len += snprintf(buf + len, size - len, "%30s%15s\n", + "MAIN", "ALT"); + len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + "RECV CNT", + as_main->recv_cnt, + as_alt->recv_cnt); + len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + "LNA1", + as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1], + as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1]); + len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + "LNA2", + as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA2], + as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA2]); + len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + "LNA1 + LNA2", + as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2], + as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]); + len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + "LNA1 - LNA2", + as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2], + as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]); +exit: + if (len > size) + len = size; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + +static const struct file_operations fops_antenna_diversity = { + .read = read_file_antenna_diversity, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static ssize_t read_file_dma(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1816,6 +1878,8 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); debugfs_create_file("diversity", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_ant_diversity); + debugfs_create_file("antenna_diversity", S_IRUSR, + sc->debug.debugfs_phy, sc, &fops_antenna_diversity); #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_btcoex); -- cgit v0.10.2 From 3fbaf4c55b2bf123085ab3adf5da97bc9a555060 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 11:53:17 +0530 Subject: ath9k: Do a quick scan only when scan_not_start is true Right now, it is being done for all cases. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 664844c..f2cd960 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -561,10 +561,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) antcomb->total_pkt_count++; antcomb->main_total_rssi += main_rssi; antcomb->alt_total_rssi += alt_rssi; - if (main_ant_conf == rx_ant_conf) + + if (main_ant_conf == rx_ant_conf) { antcomb->main_recv_cnt++; - else + ANT_STAT_INC(ANT_MAIN, recv_cnt); + ANT_LNA_INC(ANT_MAIN, rx_ant_conf); + } else { antcomb->alt_recv_cnt++; + ANT_STAT_INC(ANT_ALT, recv_cnt); + ANT_LNA_INC(ANT_ALT, rx_ant_conf); + } } /* Short scan check */ @@ -753,14 +759,12 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) } goto div_comb_done; } + ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, + main_rssi_avg, alt_rssi_avg, + alt_ratio); + antcomb->quick_scan_cnt++; } - ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, - main_rssi_avg, alt_rssi_avg, - alt_ratio); - - antcomb->quick_scan_cnt++; - div_comb_done: ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 76e38d3..ed8f8ef 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -609,7 +609,7 @@ struct ath_ant_comb { int rssi_third; bool alt_good; int quick_scan_cnt; - int main_conf; + enum ath9k_ant_div_comb_lna_conf main_conf; enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf; enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf; bool first_ratio; diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index fc67919..a879e45 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -28,9 +28,13 @@ struct fft_sample_tlv; #ifdef CONFIG_ATH9K_DEBUGFS #define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ #define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++ +#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++ +#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_config_cnt[c]++; #else #define TX_STAT_INC(q, c) do { } while (0) #define RESET_STAT_INC(sc, type) do { } while (0) +#define ANT_STAT_INC(i, c) do { } while (0) +#define ANT_LNA_INC(i, c) do { } while (0) #endif enum ath_reset_type { @@ -243,11 +247,20 @@ struct ath_rx_stats { u32 rx_spectral; }; +#define ANT_MAIN 0 +#define ANT_ALT 1 + +struct ath_antenna_stats { + u32 recv_cnt; + u32 lna_config_cnt[4]; +}; + struct ath_stats { struct ath_interrupt_stats istats; struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; struct ath_rx_stats rxstats; struct ath_dfs_stats dfs_stats; + struct ath_antenna_stats ant_stats[2]; u32 reset[__RESET_TYPE_MAX]; }; -- cgit v0.10.2 From 9383be420460908df0118d54894ef65317821f3a Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 11:53:18 +0530 Subject: ath9k: Use a subroutine to check for short scan Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index f2cd960..69674d4 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -540,6 +540,27 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, } } +static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb) +{ + int alt_ratio; + + if (!antcomb->scan || !antcomb->alt_good) + return false; + + if (time_after(jiffies, antcomb->scan_start_time + + msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) + return true; + + if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { + alt_ratio = ((antcomb->alt_recv_cnt * 100) / + antcomb->total_pkt_count); + if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) + return true; + } + + return false; +} + void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) { struct ath_hw_antcomb_conf div_ant_conf; @@ -574,22 +595,10 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) } /* Short scan check */ - if (antcomb->scan && antcomb->alt_good) { - if (time_after(jiffies, antcomb->scan_start_time + - msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) - short_scan = true; - else - if (antcomb->total_pkt_count == - ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { - alt_ratio = ((antcomb->alt_recv_cnt * 100) / - antcomb->total_pkt_count); - if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) - short_scan = true; - } - } + short_scan = ath_ant_short_scan_check(antcomb); if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || - rs->rs_moreaggr) && !short_scan) + rs->rs_moreaggr) && !short_scan) return; if (antcomb->total_pkt_count) { -- cgit v0.10.2 From 552bde40dd4c5d5b3e48e950e6bf2bfb0de6ab1f Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 11:53:19 +0530 Subject: ath9k: Add ALT check for cards with GROUP-3 config Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 69674d4..cfb43cc 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -25,28 +25,45 @@ static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); } -static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, - int curr_main_set, int curr_alt_set, - int alt_rssi_avg, int main_rssi_avg) +static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf conf, + int alt_ratio, int alt_rssi_avg, + int main_rssi_avg) { - bool result = false; - switch (div_group) { + bool result, set1, set2; + + result = set1 = set2 = false; + + if (conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2 && + conf.alt_lna_conf == ATH_ANT_DIV_COMB_LNA1) + set1 = true; + + if (conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1 && + conf.alt_lna_conf == ATH_ANT_DIV_COMB_LNA2) + set2 = true; + + switch (conf.div_group) { case 0: if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) result = true; break; case 1: case 2: - if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && - (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && - (alt_rssi_avg >= (main_rssi_avg - 5))) || - ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && - (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && - (alt_rssi_avg >= (main_rssi_avg - 2)))) && - (alt_rssi_avg >= 4)) + if (alt_rssi_avg < 4) + break; + + if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) || + (set2 && (alt_rssi_avg >= (main_rssi_avg - 2)))) result = true; - else - result = false; + + break; + case 3: + if (alt_rssi_avg < 4) + break; + + if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) || + (set2 && (alt_rssi_avg >= (main_rssi_avg + 3)))) + result = true; + break; } @@ -632,9 +649,8 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) } if (!antcomb->scan) { - if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, - alt_ratio, curr_main_set, curr_alt_set, - alt_rssi_avg, main_rssi_avg)) { + if (ath_ant_div_comb_alt_check(div_ant_conf, alt_ratio, + alt_rssi_avg, main_rssi_avg)) { if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { /* Switch main and alt LNA */ div_ant_conf.main_lna_conf = -- cgit v0.10.2 From ef999114ee1e39236fac49903c661e9c019b03ee Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 11:53:20 +0530 Subject: ath9k: Use a subroutine to try LNA switch Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index cfb43cc..ee5f9be 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -25,7 +25,7 @@ static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); } -static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf conf, +static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf, int alt_ratio, int alt_rssi_avg, int main_rssi_avg) { @@ -33,15 +33,15 @@ static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf conf, result = set1 = set2 = false; - if (conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2 && - conf.alt_lna_conf == ATH_ANT_DIV_COMB_LNA1) + if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2 && + conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA1) set1 = true; - if (conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1 && - conf.alt_lna_conf == ATH_ANT_DIV_COMB_LNA2) + if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA1 && + conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA2) set2 = true; - switch (conf.div_group) { + switch (conf->div_group) { case 0: if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) result = true; @@ -557,6 +557,43 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, } } +static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf, + int alt_ratio, int alt_rssi_avg, + int main_rssi_avg, int curr_main_set, + int curr_alt_set) +{ + bool ret = false; + + if (ath_ant_div_comb_alt_check(div_ant_conf, alt_ratio, + alt_rssi_avg, main_rssi_avg)) { + if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { + /* + * Switch main and alt LNA. + */ + div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; + div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { + div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; + div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + } + + ret = true; + } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && + (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { + /* + Set alt to another LNA. + */ + if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) + div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) + div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + + ret = true; + } + + return ret; +} + static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb) { int alt_ratio; @@ -587,7 +624,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) int main_rssi = rs->rs_rssi_ctl0; int alt_rssi = rs->rs_rssi_ctl1; int rx_ant_conf, main_ant_conf; - bool short_scan = false; + bool short_scan = false, ret; rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & ATH_ANT_RX_MASK; @@ -627,11 +664,9 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) antcomb->total_pkt_count); } - ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); curr_alt_set = div_ant_conf.alt_lna_conf; curr_main_set = div_ant_conf.main_lna_conf; - antcomb->count++; if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { @@ -649,40 +684,17 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) } if (!antcomb->scan) { - if (ath_ant_div_comb_alt_check(div_ant_conf, alt_ratio, - alt_rssi_avg, main_rssi_avg)) { - if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { - /* Switch main and alt LNA */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - } - - goto div_comb_done; - } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && - (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { - /* Set alt to another LNA */ - if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - - goto div_comb_done; - } - - if ((alt_rssi_avg < (main_rssi_avg + - div_ant_conf.lna1_lna2_delta))) + ret = ath_ant_try_switch(&div_ant_conf, alt_ratio, + alt_rssi_avg, main_rssi_avg, + curr_main_set, curr_alt_set); + if (ret) goto div_comb_done; } + if (!antcomb->scan && + (alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta))) + goto div_comb_done; + if (!antcomb->scan_not_start) { switch (curr_alt_set) { case ATH_ANT_DIV_COMB_LNA2: -- cgit v0.10.2 From 5f800ffbbb29c51fbcd886080dc3b0a6ab20b9b9 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 11:53:21 +0530 Subject: ath9k: Use a helper function for checking LNA options Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index ee5f9be..0b157ad 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -557,6 +557,79 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, } } +static void ath_ant_try_scan(struct ath_ant_comb *antcomb, + struct ath_hw_antcomb_conf *conf, + int curr_alt_set, int alt_rssi_avg, + int main_rssi_avg) +{ + switch (curr_alt_set) { + case ATH_ANT_DIV_COMB_LNA2: + antcomb->rssi_lna2 = alt_rssi_avg; + antcomb->rssi_lna1 = main_rssi_avg; + antcomb->scan = true; + /* set to A+B */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; + break; + case ATH_ANT_DIV_COMB_LNA1: + antcomb->rssi_lna1 = alt_rssi_avg; + antcomb->rssi_lna2 = main_rssi_avg; + antcomb->scan = true; + /* set to A+B */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; + break; + case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: + antcomb->rssi_add = alt_rssi_avg; + antcomb->scan = true; + /* set to A-B */ + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; + break; + case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: + antcomb->rssi_sub = alt_rssi_avg; + antcomb->scan = false; + if (antcomb->rssi_lna2 > + (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { + /* use LNA2 as main LNA */ + if ((antcomb->rssi_add > antcomb->rssi_lna1) && + (antcomb->rssi_add > antcomb->rssi_sub)) { + /* set to A+B */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; + } else if (antcomb->rssi_sub > + antcomb->rssi_lna1) { + /* set to A-B */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; + } else { + /* set to LNA1 */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + } + } else { + /* use LNA1 as main LNA */ + if ((antcomb->rssi_add > antcomb->rssi_lna2) && + (antcomb->rssi_add > antcomb->rssi_sub)) { + /* set to A+B */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; + } else if (antcomb->rssi_sub > + antcomb->rssi_lna1) { + /* set to A-B */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; + } else { + /* set to LNA2 */ + conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + } + } + break; + default: + break; + } +} + static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf, int alt_ratio, int alt_rssi_avg, int main_rssi_avg, int curr_main_set, @@ -696,103 +769,22 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) goto div_comb_done; if (!antcomb->scan_not_start) { - switch (curr_alt_set) { - case ATH_ANT_DIV_COMB_LNA2: - antcomb->rssi_lna2 = alt_rssi_avg; - antcomb->rssi_lna1 = main_rssi_avg; - antcomb->scan = true; - /* set to A+B */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; - break; - case ATH_ANT_DIV_COMB_LNA1: - antcomb->rssi_lna1 = alt_rssi_avg; - antcomb->rssi_lna2 = main_rssi_avg; - antcomb->scan = true; - /* set to A+B */ - div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; - break; - case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: - antcomb->rssi_add = alt_rssi_avg; - antcomb->scan = true; - /* set to A-B */ - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; - break; - case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: - antcomb->rssi_sub = alt_rssi_avg; - antcomb->scan = false; - if (antcomb->rssi_lna2 > - (antcomb->rssi_lna1 + - ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { - /* use LNA2 as main LNA */ - if ((antcomb->rssi_add > antcomb->rssi_lna1) && - (antcomb->rssi_add > antcomb->rssi_sub)) { - /* set to A+B */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; - } else if (antcomb->rssi_sub > - antcomb->rssi_lna1) { - /* set to A-B */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; - } else { - /* set to LNA1 */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - } - } else { - /* use LNA1 as main LNA */ - if ((antcomb->rssi_add > antcomb->rssi_lna2) && - (antcomb->rssi_add > antcomb->rssi_sub)) { - /* set to A+B */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; - } else if (antcomb->rssi_sub > - antcomb->rssi_lna1) { - /* set to A-B */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; - } else { - /* set to LNA2 */ - div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - } - } - break; - default: - break; - } + ath_ant_try_scan(antcomb, &div_ant_conf, curr_alt_set, + alt_rssi_avg, main_rssi_avg); } else { if (!antcomb->alt_good) { antcomb->scan_not_start = false; /* Set alt to another LNA */ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA2; + ATH_ANT_DIV_COMB_LNA2; div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; + ATH_ANT_DIV_COMB_LNA1; } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { div_ant_conf.main_lna_conf = - ATH_ANT_DIV_COMB_LNA1; + ATH_ANT_DIV_COMB_LNA1; div_ant_conf.alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; + ATH_ANT_DIV_COMB_LNA2; } goto div_comb_done; } -- cgit v0.10.2 From 9ddf03017917970c53da577b263e1686ef5c9652 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 11:53:22 +0530 Subject: ath9k: Simplify checks in quick_scan There is a function to do a ratio comparison for ALT, so make use of it. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 0b157ad..6495fc6 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -164,11 +164,11 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, else antcomb->first_ratio = false; } else { - if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && - (alt_rssi_avg > main_rssi_avg + - ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || - (alt_rssi_avg > main_rssi_avg)) && - (antcomb->total_pkt_count > 50)) + if (ath_is_alt_ant_ratio_better(alt_ratio, + ATH_ANT_DIV_COMB_LNA1_DELTA_HI, + 0, + main_rssi_avg, alt_rssi_avg, + antcomb->total_pkt_count)) antcomb->first_ratio = true; else antcomb->first_ratio = false; @@ -219,11 +219,11 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, else antcomb->second_ratio = false; } else { - if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && - (alt_rssi_avg > main_rssi_avg + - ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || - (alt_rssi_avg > main_rssi_avg)) && - (antcomb->total_pkt_count > 50)) + if (ath_is_alt_ant_ratio_better(alt_ratio, + ATH_ANT_DIV_COMB_LNA1_DELTA_HI, + 0, + main_rssi_avg, alt_rssi_avg, + antcomb->total_pkt_count)) antcomb->second_ratio = true; else antcomb->second_ratio = false; -- cgit v0.10.2 From 37133002f5263153e71a509191fbb8951c1de3e0 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 11:53:23 +0530 Subject: ath9k: Use a subroutine to calculate ALT ratio Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 6495fc6..ea4ee08 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -125,6 +125,74 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, } } +static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb, + struct ath_hw_antcomb_conf *conf) +{ + /* set alt to the conf with maximun ratio */ + if (antcomb->first_ratio && antcomb->second_ratio) { + if (antcomb->rssi_second > antcomb->rssi_third) { + /* first alt*/ + if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || + (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) + /* Set alt LNA1 or LNA2*/ + if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + else + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + else + /* Set alt to A+B or A-B */ + conf->alt_lna_conf = + antcomb->first_quick_scan_conf; + } else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || + (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) { + /* Set alt LNA1 or LNA2 */ + if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + else + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + } else { + /* Set alt to A+B or A-B */ + conf->alt_lna_conf = antcomb->second_quick_scan_conf; + } + } else if (antcomb->first_ratio) { + /* first alt */ + if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || + (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) + /* Set alt LNA1 or LNA2 */ + if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + else + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + else + /* Set alt to A+B or A-B */ + conf->alt_lna_conf = antcomb->first_quick_scan_conf; + } else if (antcomb->second_ratio) { + /* second alt */ + if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) || + (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) + /* Set alt LNA1 or LNA2 */ + if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + else + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + else + /* Set alt to A+B or A-B */ + conf->alt_lna_conf = antcomb->second_quick_scan_conf; + } else { + /* main is largest */ + if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || + (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) + /* Set alt LNA1 or LNA2 */ + if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2) + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1; + else + conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2; + else + /* Set alt to A+B or A-B */ + conf->alt_lna_conf = antcomb->main_conf; + } +} + static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, struct ath_hw_antcomb_conf *div_ant_conf, int main_rssi_avg, int alt_rssi_avg, @@ -181,17 +249,21 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, antcomb->rssi_first = main_rssi_avg; antcomb->rssi_third = alt_rssi_avg; - if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) + switch(antcomb->second_quick_scan_conf) { + case ATH_ANT_DIV_COMB_LNA1: antcomb->rssi_lna1 = alt_rssi_avg; - else if (antcomb->second_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA2) + break; + case ATH_ANT_DIV_COMB_LNA2: antcomb->rssi_lna2 = alt_rssi_avg; - else if (antcomb->second_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { + break; + case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) antcomb->rssi_lna2 = main_rssi_avg; else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) antcomb->rssi_lna1 = main_rssi_avg; + break; + default: + break; } if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + @@ -229,95 +301,8 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, antcomb->second_ratio = false; } - /* set alt to the conf with maximun ratio */ - if (antcomb->first_ratio && antcomb->second_ratio) { - if (antcomb->rssi_second > antcomb->rssi_third) { - /* first alt*/ - if ((antcomb->first_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA1) || - (antcomb->first_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA2)) - /* Set alt LNA1 or LNA2*/ - if (div_ant_conf->main_lna_conf == - ATH_ANT_DIV_COMB_LNA2) - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - else - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - else - /* Set alt to A+B or A-B */ - div_ant_conf->alt_lna_conf = - antcomb->first_quick_scan_conf; - } else if ((antcomb->second_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA1) || - (antcomb->second_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA2)) { - /* Set alt LNA1 or LNA2 */ - if (div_ant_conf->main_lna_conf == - ATH_ANT_DIV_COMB_LNA2) - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - else - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - } else { - /* Set alt to A+B or A-B */ - div_ant_conf->alt_lna_conf = - antcomb->second_quick_scan_conf; - } - } else if (antcomb->first_ratio) { - /* first alt */ - if ((antcomb->first_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA1) || - (antcomb->first_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA2)) - /* Set alt LNA1 or LNA2 */ - if (div_ant_conf->main_lna_conf == - ATH_ANT_DIV_COMB_LNA2) - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - else - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - else - /* Set alt to A+B or A-B */ - div_ant_conf->alt_lna_conf = - antcomb->first_quick_scan_conf; - } else if (antcomb->second_ratio) { - /* second alt */ - if ((antcomb->second_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA1) || - (antcomb->second_quick_scan_conf == - ATH_ANT_DIV_COMB_LNA2)) - /* Set alt LNA1 or LNA2 */ - if (div_ant_conf->main_lna_conf == - ATH_ANT_DIV_COMB_LNA2) - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - else - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - else - /* Set alt to A+B or A-B */ - div_ant_conf->alt_lna_conf = - antcomb->second_quick_scan_conf; - } else { - /* main is largest */ - if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || - (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) - /* Set alt LNA1 or LNA2 */ - if (div_ant_conf->main_lna_conf == - ATH_ANT_DIV_COMB_LNA2) - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA1; - else - div_ant_conf->alt_lna_conf = - ATH_ANT_DIV_COMB_LNA2; - else - /* Set alt to A+B or A-B */ - div_ant_conf->alt_lna_conf = antcomb->main_conf; - } + ath_ant_set_alt_ratio(antcomb, div_ant_conf); + break; default: break; -- cgit v0.10.2 From e3d5291436ff9efeeb968459724af5332305dded Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 1 Aug 2013 20:57:06 +0530 Subject: ath9k: Add statistics for antenna diversity Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index ea4ee08..291ca01 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -695,15 +695,18 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) antcomb->main_total_rssi += main_rssi; antcomb->alt_total_rssi += alt_rssi; - if (main_ant_conf == rx_ant_conf) { + if (main_ant_conf == rx_ant_conf) antcomb->main_recv_cnt++; - ANT_STAT_INC(ANT_MAIN, recv_cnt); - ANT_LNA_INC(ANT_MAIN, rx_ant_conf); - } else { + else antcomb->alt_recv_cnt++; - ANT_STAT_INC(ANT_ALT, recv_cnt); - ANT_LNA_INC(ANT_ALT, rx_ant_conf); - } + } + + if (main_ant_conf == rx_ant_conf) { + ANT_STAT_INC(ANT_MAIN, recv_cnt); + ANT_LNA_INC(ANT_MAIN, rx_ant_conf); + } else { + ANT_STAT_INC(ANT_ALT, recv_cnt); + ANT_LNA_INC(ANT_ALT, rx_ant_conf); } /* Short scan check */ @@ -782,6 +785,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) div_comb_done: ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); + ath9k_debug_stat_ant(sc, &div_ant_conf, main_rssi_avg, alt_rssi_avg); antcomb->scan_start_time = jiffies; antcomb->total_pkt_count = 0; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index d8764ae..e744d97 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -321,6 +321,20 @@ static const struct file_operations fops_ant_diversity = { .llseek = default_llseek, }; +void ath9k_debug_stat_ant(struct ath_softc *sc, + struct ath_hw_antcomb_conf *div_ant_conf, + int main_rssi_avg, int alt_rssi_avg) +{ + struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN]; + struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT]; + + as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++; + as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++; + + as_main->rssi_avg = main_rssi_avg; + as_alt->rssi_avg = alt_rssi_avg; +} + static ssize_t read_file_antenna_diversity(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -330,9 +344,14 @@ static ssize_t read_file_antenna_diversity(struct file *file, struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN]; struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT]; + struct ath_hw_antcomb_conf div_ant_conf; unsigned int len = 0, size = 1024; ssize_t retval = 0; char *buf; + char *lna_conf_str[4] = {"LNA1_MINUS_LNA2", + "LNA2", + "LNA1", + "LNA1_PLUS_LNA2"}; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) @@ -344,28 +363,66 @@ static ssize_t read_file_antenna_diversity(struct file *file, goto exit; } + ath9k_ps_wakeup(sc); + ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf); + len += snprintf(buf + len, size - len, "Current MAIN config : %s\n", + lna_conf_str[div_ant_conf.main_lna_conf]); + len += snprintf(buf + len, size - len, "Current ALT config : %s\n", + lna_conf_str[div_ant_conf.alt_lna_conf]); + len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n", + as_main->rssi_avg); + len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n", + as_alt->rssi_avg); + ath9k_ps_restore(sc); + + len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n"); + len += snprintf(buf + len, size - len, "-------------------\n"); + len += snprintf(buf + len, size - len, "%30s%15s\n", "MAIN", "ALT"); - len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", - "RECV CNT", + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", + "TOTAL COUNT", as_main->recv_cnt, as_alt->recv_cnt); - len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", + "LNA1", + as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1], + as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]); + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", + "LNA2", + as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2], + as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]); + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", + "LNA1 + LNA2", + as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2], + as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]); + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", + "LNA1 - LNA2", + as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2], + as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]); + + len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n"); + len += snprintf(buf + len, size - len, "--------------------\n"); + + len += snprintf(buf + len, size - len, "%30s%15s\n", + "MAIN", "ALT"); + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", "LNA1", - as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1], - as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1]); - len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1], + as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]); + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", "LNA2", - as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA2], - as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA2]); - len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2], + as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]); + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", "LNA1 + LNA2", - as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2], - as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]); - len += snprintf(buf + len, size - len, "%-15s%15d%15d\n", + as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2], + as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]); + len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n", "LNA1 - LNA2", - as_main->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2], - as_alt->lna_config_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]); + as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2], + as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]); + exit: if (len > size) len = size; diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index a879e45..01c5c6a 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -29,7 +29,7 @@ struct fft_sample_tlv; #define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ #define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++ #define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++ -#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_config_cnt[c]++; +#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++; #else #define TX_STAT_INC(q, c) do { } while (0) #define RESET_STAT_INC(sc, type) do { } while (0) @@ -252,7 +252,9 @@ struct ath_rx_stats { struct ath_antenna_stats { u32 recv_cnt; - u32 lna_config_cnt[4]; + u32 rssi_avg; + u32 lna_recv_cnt[4]; + u32 lna_attempt_cnt[4]; }; struct ath_stats { @@ -294,10 +296,11 @@ void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); - void ath_debug_send_fft_sample(struct ath_softc *sc, struct fft_sample_tlv *fft_sample); - +void ath9k_debug_stat_ant(struct ath_softc *sc, + struct ath_hw_antcomb_conf *div_ant_conf, + int main_rssi_avg, int alt_rssi_avg); #else #define RX_STAT_INC(c) /* NOP */ @@ -310,12 +313,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah) static inline void ath9k_deinit_debug(struct ath_softc *sc) { } - static inline void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) { } - static inline void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, @@ -323,11 +324,16 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc, unsigned int flags) { } - static inline void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) { } +static inline void ath9k_debug_stat_ant(struct ath_softc *sc, + struct ath_hw_antcomb_conf *div_ant_conf, + int main_rssi_avg, int alt_rssi_avg) +{ + +} #endif /* CONFIG_ATH9K_DEBUGFS */ -- cgit v0.10.2 From 84915c646271abc6b87fed0477dc72278fe4f8a3 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 1 Aug 2013 10:07:05 +0300 Subject: gianfar: Remove unused field grp_id from gfar_priv_grp grp->grp_id is obsolete. It has no use in the current driver. Remove it from gfar_priv_grp and put the 'rstat' member in its place, in the 2nd cache line, as rstat needs fast access. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 8d2db7b..dbb34f7 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np, return -EINVAL; } - grp->grp_id = priv->num_grps; grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 04b552c..ee19f2c 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1009,7 +1009,6 @@ struct gfar_irqinfo { * @napi: the napi poll function * @priv: back pointer to the priv structure * @regs: the ioremapped register space for this group - * @grp_id: group id for this group * @irqinfo: TX/RX/ER irq data for this group */ @@ -1018,11 +1017,10 @@ struct gfar_priv_grp { struct napi_struct napi; struct gfar_private *priv; struct gfar __iomem *regs; - unsigned int grp_id; + unsigned int rstat; unsigned long num_rx_queues; unsigned long rx_bit_map; /* cacheline 3 */ - unsigned int rstat; unsigned int tstat; unsigned long num_tx_queues; unsigned long tx_bit_map; -- cgit v0.10.2 From a8eaed55f86ec45a58414ffd04818be641b9e0de Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: set hw_features and vlan_features in setup This change allows the user to configure various features of the tile networking drivers on and off. There is no change to the default initialization state of either the tilegx or tilepro drivers. Neither driver needs the ndo_fix_features or ndo_set_features callbacks, since the generic code already handles the dependencies for fix_features, and there is no hardware state to tweak in set_features. Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index f3c2d03..6085571 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -1800,14 +1800,21 @@ static const struct net_device_ops tile_net_ops = { */ static void tile_net_setup(struct net_device *dev) { + netdev_features_t features = 0; + ether_setup(dev); dev->netdev_ops = &tile_net_ops; dev->watchdog_timeo = TILE_NET_TIMEOUT; - dev->features |= NETIF_F_LLTX; - dev->features |= NETIF_F_HW_CSUM; - dev->features |= NETIF_F_SG; - dev->features |= NETIF_F_TSO; dev->mtu = 1500; + + features |= NETIF_F_LLTX; + features |= NETIF_F_HW_CSUM; + features |= NETIF_F_SG; + features |= NETIF_F_TSO; + + dev->hw_features |= features; + dev->vlan_features |= features; + dev->features |= features; } /* Allocate the device structure, register the device, and obtain the diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index f66ac20..34b43b4 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -89,10 +89,6 @@ /* ISSUE: This has not been thoroughly tested (except at 1500). */ #define TILE_NET_MTU 1500 -/* HACK: Define to support GSO. */ -/* ISSUE: This may actually hurt performance of the TCP blaster. */ -/* #define TILE_NET_GSO */ - /* Define this to collapse "duplicate" acks. */ /* #define IGNORE_DUP_ACKS */ @@ -2336,39 +2332,28 @@ static const struct net_device_ops tile_net_ops = { */ static void tile_net_setup(struct net_device *dev) { - PDEBUG("tile_net_setup()\n"); + netdev_features_t features = 0; ether_setup(dev); - dev->netdev_ops = &tile_net_ops; - dev->watchdog_timeo = TILE_NET_TIMEOUT; + dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; + dev->mtu = TILE_NET_MTU; - /* We want lockless xmit. */ - dev->features |= NETIF_F_LLTX; - - /* We support hardware tx checksums. */ - dev->features |= NETIF_F_HW_CSUM; - - /* We support scatter/gather. */ - dev->features |= NETIF_F_SG; - - /* We support TSO. */ - dev->features |= NETIF_F_TSO; - -#ifdef TILE_NET_GSO - /* We support GSO. */ - dev->features |= NETIF_F_GSO; -#endif + features |= NETIF_F_LLTX; + features |= NETIF_F_HW_CSUM; + features |= NETIF_F_SG; + features |= NETIF_F_TSO; + /* We can't support HIGHDMA without hash_default, since we need + * to be able to finv() with a VA if we don't have hash_default. + */ if (hash_default) - dev->features |= NETIF_F_HIGHDMA; - - /* ISSUE: We should support NETIF_F_UFO. */ + features |= NETIF_F_HIGHDMA; - dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; - - dev->mtu = TILE_NET_MTU; + dev->hw_features |= features; + dev->vlan_features |= features; + dev->features |= features; } -- cgit v0.10.2 From 439a93a08407c244e9a5a5e4432ea85a2d06ec47 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: support rx_dropped/rx_errors in tilepro net driver Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 34b43b4..327ff7b 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -772,6 +772,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); + netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt); /* Extract the packet size. FIXME: Shouldn't the second line */ /* get subtracted? Mostly moot, since it should be "zero". */ @@ -804,40 +805,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) #endif /* TILE_NET_DUMP_PACKETS */ #ifdef TILE_NET_VERIFY_INGRESS - if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && - NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { - /* Bug 6624: Includes UDP packets with a "zero" checksum. */ - pr_warning("Bad L4 checksum on %d byte packet.\n", len); - } - if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && - NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { + if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) { dump_packet(buf, len, "rx"); - panic("Bad L3 checksum."); - } - switch (NETIO_PKT_STATUS_M(metadata, pkt)) { - case NETIO_PKT_STATUS_OVERSIZE: - if (len >= 64) { - dump_packet(buf, len, "rx"); - panic("Unexpected OVERSIZE."); - } - break; - case NETIO_PKT_STATUS_BAD: - pr_warning("Unexpected BAD %ld byte packet.\n", len); + panic("Unexpected OVERSIZE."); } #endif filter = 0; - /* ISSUE: Filter TCP packets with "bad" checksums? */ - - if (!(dev->flags & IFF_UP)) { + if (pkt_status == NETIO_PKT_STATUS_BAD) { + /* Handle CRC error and hardware truncation. */ + filter = 2; + } else if (!(dev->flags & IFF_UP)) { /* Filter packets received before we're up. */ filter = 1; - } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) { + } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) && + pkt_status == NETIO_PKT_STATUS_UNDERSIZE) { /* Filter "truncated" packets. */ - filter = 1; + filter = 2; } else if (!(dev->flags & IFF_PROMISC)) { - /* FIXME: Implement HW multicast filter. */ if (!is_multicast_ether_addr(buf)) { /* Filter packets not for our address. */ const u8 *mine = dev->dev_addr; @@ -847,9 +833,12 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) u64_stats_update_begin(&stats->syncp); - if (filter) { + if (filter != 0) { - /* ISSUE: Update "drop" statistics? */ + if (filter == 1) + stats->rx_dropped++; + else + stats->rx_errors++; tile_net_provide_linux_buffer(info, va, small); -- cgit v0.10.2 From 815d3baeaefa027eb5c3c715575ab71d4eed71ec Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: avoid bug in tilepro net driver built with old hypervisor Building against headers from an older Tilera hypervisor can cause the frags[] array to be overrun. Don't enable TSO in that case. Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 327ff7b..2f4b7b9 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -1929,7 +1929,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) unsigned int csum_start = skb_checksum_start_offset(skb); - lepp_frag_t frags[LEPP_MAX_FRAGS]; + lepp_frag_t frags[1 + MAX_SKB_FRAGS]; unsigned int num_frags; @@ -1944,7 +1944,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) unsigned int cmd_head, cmd_tail, cmd_next; unsigned int comp_tail; - lepp_cmd_t cmds[LEPP_MAX_FRAGS]; + lepp_cmd_t cmds[1 + MAX_SKB_FRAGS]; /* @@ -2332,7 +2332,10 @@ static void tile_net_setup(struct net_device *dev) features |= NETIF_F_LLTX; features |= NETIF_F_HW_CSUM; features |= NETIF_F_SG; - features |= NETIF_F_TSO; + + /* We support TSO iff the HV supports sufficient frags. */ + if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS) + features |= NETIF_F_TSO; /* We can't support HIGHDMA without hash_default, since we need * to be able to finv() with a VA if we don't have hash_default. -- cgit v0.10.2 From 48f2a4e1e83992af6c721c6c93a6b012910e255f Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: remove dead is_dup_ack() function from tilepro net driver Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 2f4b7b9..fd17af5 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -89,9 +89,6 @@ /* ISSUE: This has not been thoroughly tested (except at 1500). */ #define TILE_NET_MTU 1500 -/* Define this to collapse "duplicate" acks. */ -/* #define IGNORE_DUP_ACKS */ - /* HACK: Define this to verify incoming packets. */ /* #define TILE_NET_VERIFY_INGRESS */ @@ -625,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg) } -#ifdef IGNORE_DUP_ACKS - -/* - * Help detect "duplicate" ACKs. These are sequential packets (for a - * given flow) which are exactly 66 bytes long, sharing everything but - * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, - * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are - * +N, and the Tstamps are usually identical. - * - * NOTE: Apparently truly duplicate acks (with identical "ack" values), - * should not be collapsed, as they are used for some kind of flow control. - */ -static bool is_dup_ack(char *s1, char *s2, unsigned int len) -{ - int i; - - unsigned long long ignorable = 0; - - /* Identification. */ - ignorable |= (1ULL << 0x12); - ignorable |= (1ULL << 0x13); - - /* Header checksum. */ - ignorable |= (1ULL << 0x18); - ignorable |= (1ULL << 0x19); - - /* ACK. */ - ignorable |= (1ULL << 0x2a); - ignorable |= (1ULL << 0x2b); - ignorable |= (1ULL << 0x2c); - ignorable |= (1ULL << 0x2d); - - /* WinSize. */ - ignorable |= (1ULL << 0x30); - ignorable |= (1ULL << 0x31); - - /* Checksum. */ - ignorable |= (1ULL << 0x32); - ignorable |= (1ULL << 0x33); - - for (i = 0; i < len; i++, ignorable >>= 1) { - - if ((ignorable & 1) || (s1[i] == s2[i])) - continue; - -#ifdef TILE_NET_DEBUG - /* HACK: Mention non-timestamp diffs. */ - if (i < 0x38 && i != 0x2f && - net_ratelimit()) - pr_info("Diff at 0x%x\n", i); -#endif - - return false; - } - -#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS - /* HACK: Do not suppress truly duplicate ACKs. */ - /* ISSUE: Is this actually necessary or helpful? */ - if (s1[0x2a] == s2[0x2a] && - s1[0x2b] == s2[0x2b] && - s1[0x2c] == s2[0x2c] && - s1[0x2d] == s2[0x2d]) { - return false; - } -#endif - - return true; -} - -#endif - - - static void tile_net_discard_aux(struct tile_net_cpu *info, int index) { struct tile_netio_queue *queue = &info->queue; -- cgit v0.10.2 From 2628e8af31a0ee4d28304d96a72fdf4d7822508c Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: support jumbo frames in the tilegx network driver Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c index 31b87bf..c2fb1516 100644 --- a/arch/tile/gxio/iorpc_mpipe.c +++ b/arch/tile/gxio/iorpc_mpipe.c @@ -387,6 +387,27 @@ int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac) EXPORT_SYMBOL(gxio_mpipe_link_close_aux); +struct link_set_attr_aux_param { + int mac; + uint32_t attr; + int64_t val; +}; + +int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, + uint32_t attr, int64_t val) +{ + struct link_set_attr_aux_param temp; + struct link_set_attr_aux_param *params = &temp; + + params->mac = mac; + params->attr = attr; + params->val = val; + + return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, + sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX); +} + +EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux); struct get_timestamp_aux_param { uint64_t sec; @@ -454,6 +475,32 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); +struct config_edma_ring_blks_param { + unsigned int ering; + unsigned int max_blks; + unsigned int min_snf_blks; + unsigned int db; +}; + +int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context, + unsigned int ering, unsigned int max_blks, + unsigned int min_snf_blks, unsigned int db) +{ + struct config_edma_ring_blks_param temp; + struct config_edma_ring_blks_param *params = &temp; + + params->ering = ering; + params->max_blks = max_blks; + params->min_snf_blks = min_snf_blks; + params->db = db; + + return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, + sizeof(*params), + GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS); +} + +EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks); + struct arm_pollfd_param { union iorpc_pollfd pollfd; }; diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index e71c633..0567cf0 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -383,7 +383,7 @@ EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init); int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, gxio_mpipe_context_t *context, - unsigned int edma_ring_id, + unsigned int ering, unsigned int channel, void *mem, unsigned int mem_size, unsigned int mem_flags) @@ -394,7 +394,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, /* Offset used to read number of completed commands. */ MPIPE_EDMA_POST_REGION_ADDR_t offset; - int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel, + int result = gxio_mpipe_init_edma_ring(context, ering, channel, mem, mem_size, mem_flags); if (result < 0) return result; @@ -405,7 +405,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, offset.region = MPIPE_MMIO_ADDR__REGION_VAL_EDMA - MPIPE_MMIO_ADDR__REGION_VAL_IDMA; - offset.ring = edma_ring_id; + offset.ring = ering; __gxio_dma_queue_init(&equeue->dma_queue, context->mmio_fast_base + offset.word, @@ -413,6 +413,9 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, equeue->edescs = mem; equeue->mask_num_entries = num_entries - 1; equeue->log2_num_entries = __builtin_ctz(num_entries); + equeue->context = context; + equeue->ering = ering; + equeue->channel = channel; return 0; } @@ -543,3 +546,12 @@ int gxio_mpipe_link_close(gxio_mpipe_link_t *link) } EXPORT_SYMBOL_GPL(gxio_mpipe_link_close); + +int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr, + int64_t val) +{ + return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr, + val); +} + +EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr); diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h index 9d50fce..eef60fd 100644 --- a/arch/tile/include/gxio/iorpc_mpipe.h +++ b/arch/tile/include/gxio/iorpc_mpipe.h @@ -44,10 +44,12 @@ #define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210) #define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211) #define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212) +#define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213) #define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e) #define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f) #define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220) +#define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221) #define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) #define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) #define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) @@ -114,6 +116,8 @@ int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); +int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, + uint32_t attr, int64_t val); int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, uint64_t * nsec, uint64_t * cycles); diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h index b74f470..ed742e3 100644 --- a/arch/tile/include/gxio/mpipe.h +++ b/arch/tile/include/gxio/mpipe.h @@ -810,7 +810,7 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, /* Initialize an eDMA ring, using the given memory and size. * * @param context An initialized mPIPE context. - * @param ring The eDMA ring index. + * @param ering The eDMA ring index. * @param channel The channel to use. This must be one of the channels * associated with the context's set of open links. * @param mem A physically contiguous region of memory to be filled @@ -823,10 +823,37 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure. */ extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, - unsigned int ring, unsigned int channel, + unsigned int ering, unsigned int channel, void *mem, size_t mem_size, unsigned int mem_flags); +/* Set the "max_blks", "min_snf_blks", and "db" fields of + * ::MPIPE_EDMA_RG_INIT_DAT_THRESH_t for a given edma ring. + * + * The global pool of dynamic blocks will be automatically adjusted. + * + * This function should not be called after any egress has been done + * on the edma ring. + * + * Most applications should just use gxio_mpipe_equeue_set_snf_size(). + * + * @param context An initialized mPIPE context. + * @param ering The eDMA ring index. + * @param max_blks The number of blocks to dedicate to the ring + * (normally min_snf_blks + 1). Must be greater than min_snf_blocks. + * @param min_snf_blks The number of blocks which must be stored + * prior to starting to send the packet (normally 12). + * @param db Whether to allow use of dynamic blocks by the ring + * (normally 1). + * + * @return 0 on success, negative on error. + */ +extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context, + unsigned int ering, + unsigned int max_blks, + unsigned int min_snf_blks, + unsigned int db); + /***************************************************************** * Classifier Program * ******************************************************************/ @@ -1288,15 +1315,39 @@ typedef struct { /* The log2() of the number of entries. */ unsigned long log2_num_entries; + /* The context. */ + gxio_mpipe_context_t *context; + + /* The ering. */ + unsigned int ering; + + /* The channel. */ + unsigned int channel; + } gxio_mpipe_equeue_t; /* Initialize an "equeue". * - * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring(). + * This function uses gxio_mpipe_init_edma_ring() to initialize the + * underlying edma_ring using the provided arguments. + * + * @param equeue An egress queue to be initialized. + * @param context An initialized mPIPE context. + * @param ering The eDMA ring index. + * @param channel The channel to use. This must be one of the channels + * associated with the context's set of open links. + * @param mem A physically contiguous region of memory to be filled + * with a ring of ::gxio_mpipe_edesc_t structures. + * @param mem_size Number of bytes in the ring. Must be 512, 2048, + * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)). + * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags. + * + * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or + * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure. */ extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, gxio_mpipe_context_t *context, - unsigned int edma_ring_id, + unsigned int ering, unsigned int channel, void *mem, unsigned int mem_size, unsigned int mem_flags); @@ -1494,6 +1545,37 @@ static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue, completion_slot, update); } +/* Set the snf (store and forward) size for an equeue. + * + * The snf size for an equeue defaults to 1536, and encodes the size + * of the largest packet for which egress is guaranteed to avoid + * transmission underruns and/or corrupt checksums under heavy load. + * + * The snf size affects a global resource pool which cannot support, + * for example, all 24 equeues each requesting an snf size of 8K. + * + * To ensure that jumbo packets can be egressed properly, the snf size + * should be set to the size of the largest possible packet, which + * will usually be limited by the size of the app's largest buffer. + * + * This is a convenience wrapper around + * gxio_mpipe_config_edma_ring_blks(). + * + * This function should not be called after any egress has been done + * on the equeue. + * + * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). + * @param size The snf size, in bytes. + * @return Zero on success, negative error otherwise. + */ +static inline int gxio_mpipe_equeue_set_snf_size(gxio_mpipe_equeue_t *equeue, + size_t size) +{ + int blks = (size + 127) / 128; + return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering, + blks + 1, blks, 1); +} + /***************************************************************** * Link Management * ******************************************************************/ @@ -1697,6 +1779,17 @@ static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link) return link->channel; } +/* Set a link attribute. + * + * @param link A properly initialized link state object. + * @param attr An attribute from the set of @ref gxio_mpipe_link_attrs. + * @param val New value of the attribute. + * @return 0 if the attribute was successfully set, or a negative error + * code. + */ +extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr, + int64_t val); + /////////////////////////////////////////////////////////////////// // Timestamp // /////////////////////////////////////////////////////////////////// diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 6085571..39c1e9e 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -76,6 +76,9 @@ #define MAX_FRAGS (MAX_SKB_FRAGS + 1) +/* The "kinds" of buffer stacks (small/large/jumbo). */ +#define MAX_KINDS 3 + /* Size of completions data to allocate. * ISSUE: Probably more than needed since we don't use all the channels. */ @@ -141,10 +144,8 @@ struct tile_net_info { /* NAPI flags. */ bool napi_added; bool napi_enabled; - /* Number of small sk_buffs which must still be provided. */ - unsigned int num_needed_small_buffers; - /* Number of large sk_buffs which must still be provided. */ - unsigned int num_needed_large_buffers; + /* Number of buffers (by kind) which must still be provided. */ + unsigned int num_needed_buffers[MAX_KINDS]; /* A timer for handling egress completions. */ struct hrtimer egress_timer; /* True if "egress_timer" is scheduled. */ @@ -200,24 +201,25 @@ static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); /* The "context" for all devices. */ static gxio_mpipe_context_t context; -/* Buffer sizes and mpipe enum codes for buffer stacks. +/* The buffer size enums for each buffer stack. * See arch/tile/include/gxio/mpipe.h for the set of possible values. + * We avoid the "10384" size because it can induce "false chaining" + * on "cut-through" jumbo packets. */ -#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 -#define BUFFER_SIZE_SMALL 128 -#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 -#define BUFFER_SIZE_LARGE 1664 +static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = { + GXIO_MPIPE_BUFFER_SIZE_128, + GXIO_MPIPE_BUFFER_SIZE_1664, + GXIO_MPIPE_BUFFER_SIZE_16384 +}; -/* The small/large "buffer stacks". */ -static int small_buffer_stack = -1; -static int large_buffer_stack = -1; +/* The actual memory allocated for the buffer stacks. */ +static void *buffer_stack_vas[MAX_KINDS]; -/* Amount of memory allocated for each buffer stack. */ -static size_t buffer_stack_size; +/* The amount of memory allocated for each buffer stack. */ +static size_t buffer_stack_bytes[MAX_KINDS]; -/* The actual memory allocated for the buffer stacks. */ -static void *small_buffer_stack_va; -static void *large_buffer_stack_va; +/* The first buffer stack index (small = +0, large = +1, jumbo = +2). */ +static int first_buffer_stack = -1; /* The buckets. */ static int first_bucket = -1; @@ -238,6 +240,9 @@ static char *loopify_link_name; /* If "tile_net.custom" was specified, this is non-NULL. */ static char *custom_str; +/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ +static uint jumbo_num; + /* The "tile_net.cpus" argument specifies the cpus that are dedicated * to handle ingress packets. * @@ -292,6 +297,12 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); module_param_named(custom, custom_str, charp, 0444); MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); +/* The "tile_net.jumbo" argument causes us to support "jumbo" packets, + * and to allocate the given number of "jumbo" buffers. + */ +module_param_named(jumbo, jumbo_num, uint, 0444); +MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets"); + /* Atomically update a statistics field. * Note that on TILE-Gx, this operation is fire-and-forget on the * issuing core (single-cycle dispatch) and takes only a few cycles @@ -305,15 +316,15 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field) } /* Allocate and push a buffer. */ -static bool tile_net_provide_buffer(bool small) +static bool tile_net_provide_buffer(int kind) { - int stack = small ? small_buffer_stack : large_buffer_stack; + gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; + size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); const unsigned long buffer_alignment = 128; struct sk_buff *skb; int len; - len = sizeof(struct sk_buff **) + buffer_alignment; - len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); + len = sizeof(struct sk_buff **) + buffer_alignment + bs; skb = dev_alloc_skb(len); if (skb == NULL) return false; @@ -328,7 +339,7 @@ static bool tile_net_provide_buffer(bool small) /* Make sure "skb" and the back-pointer have been flushed. */ wmb(); - gxio_mpipe_push_buffer(&context, stack, + gxio_mpipe_push_buffer(&context, first_buffer_stack + kind, (void *)va_to_tile_io_addr(skb->data)); return true; @@ -369,24 +380,19 @@ static void tile_net_pop_all_buffers(int stack) static void tile_net_provide_needed_buffers(void) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - - while (info->num_needed_small_buffers != 0) { - if (!tile_net_provide_buffer(true)) - goto oops; - info->num_needed_small_buffers--; - } - - while (info->num_needed_large_buffers != 0) { - if (!tile_net_provide_buffer(false)) - goto oops; - info->num_needed_large_buffers--; + int kind; + + for (kind = 0; kind < MAX_KINDS; kind++) { + while (info->num_needed_buffers[kind] != 0) { + if (!tile_net_provide_buffer(kind)) { + /* Add info to the allocation failure dump. */ + pr_notice("Tile %d still needs some buffers\n", + info->my_cpu); + return; + } + info->num_needed_buffers[kind]--; + } } - - return; - -oops: - /* Add a description to the page allocation failure dump. */ - pr_notice("Tile %d still needs some buffers\n", info->my_cpu); } static inline bool filter_packet(struct net_device *dev, void *buf) @@ -426,10 +432,12 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, tile_net_stats_add(len, &priv->stats.rx_bytes); /* Need a new buffer. */ - if (idesc->size == BUFFER_SIZE_SMALL_ENUM) - info->num_needed_small_buffers++; + if (idesc->size == buffer_size_enums[0]) + info->num_needed_buffers[0]++; + else if (idesc->size == buffer_size_enums[1]) + info->num_needed_buffers[1]++; else - info->num_needed_large_buffers++; + info->num_needed_buffers[2]++; } /* Handle a packet. Return true if "processed", false if "filtered". */ @@ -437,29 +445,29 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; + struct tile_net_priv *priv = netdev_priv(dev); uint8_t l2_offset; void *va; void *buf; unsigned long len; bool filter; - /* Drop packets for which no buffer was available. - * NOTE: This happens under heavy load. + /* Drop packets for which no buffer was available (which can + * happen under heavy load), or for which the me/tr/ce flags + * are set (which can happen for jumbo cut-through packets, + * or with a customized classifier). */ - if (idesc->be) { - struct tile_net_priv *priv = netdev_priv(dev); - tile_net_stats_add(1, &priv->stats.rx_dropped); - gxio_mpipe_iqueue_consume(&info->iqueue, idesc); - if (net_ratelimit()) - pr_info("Dropping packet (insufficient buffers).\n"); - return false; + if (idesc->be || idesc->me || idesc->tr || idesc->ce) { + if (dev) + tile_net_stats_add(1, &priv->stats.rx_errors); + goto drop; } /* Get the "l2_offset", if allowed. */ l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); - /* Get the raw buffer VA (includes "headroom"). */ - va = tile_io_addr_to_va((unsigned long)(long)idesc->va); + /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */ + va = tile_io_addr_to_va((unsigned long)idesc->va); /* Get the actual packet start/length. */ buf = va + l2_offset; @@ -470,6 +478,9 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) filter = filter_packet(dev, buf); if (filter) { + if (dev) + tile_net_stats_add(1, &priv->stats.rx_dropped); +drop: gxio_mpipe_iqueue_drop(&info->iqueue, idesc); } else { struct sk_buff *skb = mpipe_buf_to_skb(va); @@ -722,86 +733,95 @@ static int tile_net_update(struct net_device *dev) return 0; } -/* Allocate and initialize mpipe buffer stacks, and register them in - * the mPIPE TLBs, for both small and large packet sizes. - * This routine supports tile_net_init_mpipe(), below. - */ -static int init_buffer_stacks(struct net_device *dev, int num_buffers) +/* Initialize a buffer stack. */ +static int create_buffer_stack(struct net_device *dev, + int kind, size_t num_buffers) { pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); - int rc; + size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); + int stack_idx = first_buffer_stack + kind; + void *va; + int i, rc; - /* Compute stack bytes; we round up to 64KB and then use - * alloc_pages() so we get the required 64KB alignment as well. + /* Round up to 64KB and then use alloc_pages() so we get the + * required 64KB alignment. */ - buffer_stack_size = - ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), - 64 * 1024); - - /* Allocate two buffer stack indices. */ - rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); - if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", - rc); - return rc; - } - small_buffer_stack = rc; - large_buffer_stack = rc + 1; + buffer_stack_bytes[kind] = ALIGN(needed, 64 * 1024); - /* Allocate the small memory stack. */ - small_buffer_stack_va = - alloc_pages_exact(buffer_stack_size, GFP_KERNEL); - if (small_buffer_stack_va == NULL) { + va = alloc_pages_exact(buffer_stack_bytes[kind], GFP_KERNEL); + if (va == NULL) { netdev_err(dev, - "Could not alloc %zd bytes for buffer stacks\n", - buffer_stack_size); + "Could not alloc %zd bytes for buffer stack %d\n", + buffer_stack_bytes[kind], kind); return -ENOMEM; } - rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, - BUFFER_SIZE_SMALL_ENUM, - small_buffer_stack_va, - buffer_stack_size, 0); + + /* Initialize the buffer stack. */ + rc = gxio_mpipe_init_buffer_stack(&context, stack_idx, + buffer_size_enums[kind], + va, buffer_stack_bytes[kind], 0); if (rc != 0) { netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); + free_pages_exact(va, buffer_stack_bytes[kind]); return rc; } - rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, + + buffer_stack_vas[kind] = va; + + rc = gxio_mpipe_register_client_memory(&context, stack_idx, hash_pte, 0); if (rc != 0) { - netdev_err(dev, - "gxio_mpipe_register_buffer_memory failed: %d\n", - rc); + netdev_err(dev, "gxio_mpipe_register_client_memory: %d\n", rc); return rc; } - /* Allocate the large buffer stack. */ - large_buffer_stack_va = - alloc_pages_exact(buffer_stack_size, GFP_KERNEL); - if (large_buffer_stack_va == NULL) { - netdev_err(dev, - "Could not alloc %zd bytes for buffer stacks\n", - buffer_stack_size); - return -ENOMEM; - } - rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, - BUFFER_SIZE_LARGE_ENUM, - large_buffer_stack_va, - buffer_stack_size, 0); - if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", - rc); - return rc; + /* Provide initial buffers. */ + for (i = 0; i < num_buffers; i++) { + if (!tile_net_provide_buffer(kind)) { + netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); + return -ENOMEM; + } } - rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, - hash_pte, 0); - if (rc != 0) { - netdev_err(dev, - "gxio_mpipe_register_buffer_memory failed: %d\n", - rc); + + return 0; +} + +/* Allocate and initialize mpipe buffer stacks, and register them in + * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes. + * This routine supports tile_net_init_mpipe(), below. + */ +static int init_buffer_stacks(struct net_device *dev, + int network_cpus_count) +{ + int num_kinds = MAX_KINDS - (jumbo_num == 0); + size_t num_buffers; + int rc; + + /* Allocate the buffer stacks. */ + rc = gxio_mpipe_alloc_buffer_stacks(&context, num_kinds, 0, 0); + if (rc < 0) { + netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks: %d\n", rc); return rc; } + first_buffer_stack = rc; - return 0; + /* Enough small/large buffers to (normally) avoid buffer errors. */ + num_buffers = + network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); + + /* Allocate the small memory stack. */ + if (rc >= 0) + rc = create_buffer_stack(dev, 0, num_buffers); + + /* Allocate the large buffer stack. */ + if (rc >= 0) + rc = create_buffer_stack(dev, 1, num_buffers); + + /* Allocate the jumbo buffer stack if needed. */ + if (rc >= 0 && jumbo_num != 0) + rc = create_buffer_stack(dev, 2, jumbo_num); + + return rc; } /* Allocate per-cpu resources (memory for completions and idescs). @@ -940,13 +960,14 @@ static int tile_net_setup_interrupts(struct net_device *dev) /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ static void tile_net_init_mpipe_fail(void) { - int cpu; + int kind, cpu; /* Do cleanups that require the mpipe context first. */ - if (small_buffer_stack >= 0) - tile_net_pop_all_buffers(small_buffer_stack); - if (large_buffer_stack >= 0) - tile_net_pop_all_buffers(large_buffer_stack); + for (kind = 0; kind < MAX_KINDS; kind++) { + if (buffer_stack_vas[kind] != NULL) { + tile_net_pop_all_buffers(first_buffer_stack + kind); + } + } /* Destroy mpipe context so the hardware no longer owns any memory. */ gxio_mpipe_destroy(&context); @@ -961,15 +982,15 @@ static void tile_net_init_mpipe_fail(void) info->iqueue.idescs = NULL; } - if (small_buffer_stack_va) - free_pages_exact(small_buffer_stack_va, buffer_stack_size); - if (large_buffer_stack_va) - free_pages_exact(large_buffer_stack_va, buffer_stack_size); + for (kind = 0; kind < MAX_KINDS; kind++) { + if (buffer_stack_vas[kind] != NULL) { + free_pages_exact(buffer_stack_vas[kind], + buffer_stack_bytes[kind]); + buffer_stack_vas[kind] = NULL; + } + } - small_buffer_stack_va = NULL; - large_buffer_stack_va = NULL; - large_buffer_stack = -1; - small_buffer_stack = -1; + first_buffer_stack = -1; first_bucket = -1; } @@ -984,7 +1005,7 @@ static void tile_net_init_mpipe_fail(void) */ static int tile_net_init_mpipe(struct net_device *dev) { - int i, num_buffers, rc; + int rc; int cpu; int first_ring, ring; int network_cpus_count = cpus_weight(network_cpus_map); @@ -1001,27 +1022,10 @@ static int tile_net_init_mpipe(struct net_device *dev) } /* Set up the buffer stacks. */ - num_buffers = - network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); - rc = init_buffer_stacks(dev, num_buffers); + rc = init_buffer_stacks(dev, network_cpus_count); if (rc != 0) goto fail; - /* Provide initial buffers. */ - rc = -ENOMEM; - for (i = 0; i < num_buffers; i++) { - if (!tile_net_provide_buffer(true)) { - netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); - goto fail; - } - } - for (i = 0; i < num_buffers; i++) { - if (!tile_net_provide_buffer(false)) { - netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); - goto fail; - } - } - /* Allocate one NotifRing for each network cpu. */ rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); if (rc < 0) { @@ -1063,13 +1067,13 @@ fail: */ static int tile_net_init_egress(struct net_device *dev, int echannel) { + static int ering = -1; struct page *headers_page, *edescs_page, *equeue_page; gxio_mpipe_edesc_t *edescs; gxio_mpipe_equeue_t *equeue; unsigned char *headers; int headers_order, edescs_order, equeue_order; size_t edescs_size; - int edma; int rc = -ENOMEM; /* Only initialize once. */ @@ -1110,25 +1114,37 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) } equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); - /* Allocate an edma ring. Note that in practice this can't - * fail, which is good, because we will leak an edma ring if so. - */ - rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); - if (rc < 0) { - netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", - rc); - goto fail_equeue; + /* Allocate an edma ring (using a one entry "free list"). */ + if (ering < 0) { + rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); + if (rc < 0) { + netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: %d\n", + rc); + goto fail_equeue; + } + ering = rc; } - edma = rc; /* Initialize the equeue. */ - rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, + rc = gxio_mpipe_equeue_init(equeue, &context, ering, echannel, edescs, edescs_size, 0); if (rc != 0) { netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); goto fail_equeue; } + /* Don't reuse the ering later. */ + ering = -1; + + if (jumbo_num != 0) { + /* Make sure "jumbo" packets can be egressed safely. */ + if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) { + /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */ + netdev_warn(dev, "Jumbo packets may not be egressed" + " properly on channel %d\n", echannel); + } + } + /* Done. */ egress_for_echannel[echannel].equeue = equeue; egress_for_echannel[echannel].headers = headers; @@ -1156,6 +1172,17 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, netdev_err(dev, "Failed to open '%s'\n", link_name); return rc; } + if (jumbo_num != 0) { + u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO; + rc = gxio_mpipe_link_set_attr(link, attr, 1); + if (rc != 0) { + netdev_err(dev, + "Cannot receive jumbo packets on '%s'\n", + link_name); + gxio_mpipe_link_close(link); + return rc; + } + } rc = gxio_mpipe_link_channel(link); if (rc < 0 || rc >= TILE_NET_CHANNELS) { netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); @@ -1499,8 +1526,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, edesc_head.xfer_size = sh_len; /* This is only used to specify the TLB. */ - edesc_head.stack_idx = large_buffer_stack; - edesc_body.stack_idx = large_buffer_stack; + edesc_head.stack_idx = first_buffer_stack; + edesc_body.stack_idx = first_buffer_stack; /* Egress all the edescs. */ for (segment = 0; segment < sh->gso_segs; segment++) { @@ -1660,7 +1687,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); /* This is only used to specify the TLB. */ - edesc.stack_idx = large_buffer_stack; + edesc.stack_idx = first_buffer_stack; /* Prepare the edescs. */ for (i = 0; i < num_edescs; i++) { @@ -1740,7 +1767,9 @@ static struct net_device_stats *tile_net_get_stats(struct net_device *dev) /* Change the MTU. */ static int tile_net_change_mtu(struct net_device *dev, int new_mtu) { - if ((new_mtu < 68) || (new_mtu > 1500)) + if (new_mtu < 68) + return -EINVAL; + if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500)) return -EINVAL; dev->mtu = new_mtu; return 0; -- cgit v0.10.2 From ad0181855a48641e812454e64c62c07e015b6a97 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: update dev->stats directly in tilegx network driver Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 39c1e9e..e9eba2b 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -178,8 +178,6 @@ struct tile_net_priv { int loopify_channel; /* The egress channel (channel or loopify_channel). */ int echannel; - /* Total stats. */ - struct net_device_stats stats; }; /* Egress info, indexed by "priv->echannel" (lazily created as needed). */ @@ -414,7 +412,6 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, gxio_mpipe_idesc_t *idesc, unsigned long len) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - struct tile_net_priv *priv = netdev_priv(dev); /* Encode the actual packet length. */ skb_put(skb, len); @@ -428,8 +425,8 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, netif_receive_skb(skb); /* Update stats. */ - tile_net_stats_add(1, &priv->stats.rx_packets); - tile_net_stats_add(len, &priv->stats.rx_bytes); + tile_net_stats_add(1, &dev->stats.rx_packets); + tile_net_stats_add(len, &dev->stats.rx_bytes); /* Need a new buffer. */ if (idesc->size == buffer_size_enums[0]) @@ -445,7 +442,6 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; - struct tile_net_priv *priv = netdev_priv(dev); uint8_t l2_offset; void *va; void *buf; @@ -459,7 +455,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) */ if (idesc->be || idesc->me || idesc->tr || idesc->ce) { if (dev) - tile_net_stats_add(1, &priv->stats.rx_errors); + tile_net_stats_add(1, &dev->stats.rx_errors); goto drop; } @@ -479,7 +475,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) filter = filter_packet(dev, buf); if (filter) { if (dev) - tile_net_stats_add(1, &priv->stats.rx_dropped); + tile_net_stats_add(1, &dev->stats.rx_dropped); drop: gxio_mpipe_iqueue_drop(&info->iqueue, idesc); } else { @@ -1502,7 +1498,6 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, struct sk_buff *skb, unsigned char *headers, s64 slot) { - struct tile_net_priv *priv = netdev_priv(dev); struct skb_shared_info *sh = skb_shinfo(skb); unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); unsigned int data_len = skb->len - sh_len; @@ -1580,8 +1575,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, } /* Update stats. */ - tile_net_stats_add(tx_packets, &priv->stats.tx_packets); - tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); + tile_net_stats_add(tx_packets, &dev->stats.tx_packets); + tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes); } /* Do "TSO" handling for egress. @@ -1724,9 +1719,9 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) add_comp(equeue, comps, slot - 1, skb); /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ - tile_net_stats_add(1, &priv->stats.tx_packets); + tile_net_stats_add(1, &dev->stats.tx_packets); tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), - &priv->stats.tx_bytes); + &dev->stats.tx_bytes); local_irq_restore(irqflags); @@ -1757,13 +1752,6 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EOPNOTSUPP; } -/* Get system network statistics for device. */ -static struct net_device_stats *tile_net_get_stats(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - return &priv->stats; -} - /* Change the MTU. */ static int tile_net_change_mtu(struct net_device *dev, int new_mtu) { @@ -1813,7 +1801,6 @@ static const struct net_device_ops tile_net_ops = { .ndo_start_xmit = tile_net_tx, .ndo_select_queue = tile_net_select_queue, .ndo_do_ioctl = tile_net_ioctl, - .ndo_get_stats = tile_net_get_stats, .ndo_change_mtu = tile_net_change_mtu, .ndo_tx_timeout = tile_net_tx_timeout, .ndo_set_mac_address = tile_net_set_mac_address, -- cgit v0.10.2 From 5e7a54a2a78d59d8c6e0f71711a3eafa4ab969af Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: fix panic bug in napi support for tilegx network driver The code used to call napi_disable() in an interrupt handler (from smp_call_function), which in turn could call msleep(). Unfortunately you can't sleep in an interrupt context. Luckily it turns out all the NAPI support functions are just operating on data structures and not on any deeply per-cpu data, so we can arrange to set up and tear down all the NAPI state on the core driving the process, and just do the IRQ enable/disable as a smp_call_function thing. Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index e9eba2b..17bcf33 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -650,37 +650,13 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) return HRTIMER_NORESTART; } -/* Helper function for "tile_net_update()". - * "dev" (i.e. arg) is the device being brought up or down, - * or NULL if all devices are now down. - */ -static void tile_net_update_cpu(void *arg) +/* Helper function for "tile_net_update()". */ +static void manage_ingress_irq(void *enable) { - struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - struct net_device *dev = arg; - - if (!info->has_iqueue) - return; - - if (dev != NULL) { - if (!info->napi_added) { - netif_napi_add(dev, &info->napi, - tile_net_poll, TILE_NET_WEIGHT); - info->napi_added = true; - } - if (!info->napi_enabled) { - napi_enable(&info->napi); - info->napi_enabled = true; - } + if (enable) enable_percpu_irq(ingress_irq, 0); - } else { + else disable_percpu_irq(ingress_irq); - if (info->napi_enabled) { - napi_disable(&info->napi); - info->napi_enabled = false; - } - /* FIXME: Drain the iqueue. */ - } } /* Helper function for tile_net_open() and tile_net_stop(). @@ -717,10 +693,35 @@ static int tile_net_update(struct net_device *dev) return -EIO; } - /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ - for_each_online_cpu(cpu) - smp_call_function_single(cpu, tile_net_update_cpu, - (saw_channel ? dev : NULL), 1); + /* Update all cpus, sequentially (to protect "netif_napi_add()"). + * We use on_each_cpu to handle the IPI mask or unmask. + */ + if (!saw_channel) + on_each_cpu(manage_ingress_irq, (void *)0, 1); + for_each_online_cpu(cpu) { + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + if (!info->has_iqueue) + continue; + if (saw_channel) { + if (!info->napi_added) { + netif_napi_add(dev, &info->napi, + tile_net_poll, TILE_NET_WEIGHT); + info->napi_added = true; + } + if (!info->napi_enabled) { + napi_enable(&info->napi); + info->napi_enabled = true; + } + } else { + if (info->napi_enabled) { + napi_disable(&info->napi); + info->napi_enabled = false; + } + /* FIXME: Drain the iqueue. */ + } + } + if (saw_channel) + on_each_cpu(manage_ingress_irq, (void *)1, 1); /* HACK: Allow packets to flow in the simulator. */ if (saw_channel) -- cgit v0.10.2 From 6ab4ae9aadef65e2f7aca44fd963c302dcb5849e Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: enable GRO in the tilegx network driver Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 17bcf33..2b1c31f 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -422,7 +422,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, if (idesc->cs && idesc->csum_seed_val == 0xFFFF) skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_receive_skb(skb); + napi_gro_receive(&info->napi, skb); /* Update stats. */ tile_net_stats_add(1, &dev->stats.rx_packets); -- cgit v0.10.2 From f3286a3af89d6db7a488f3e8f02b98d67d50f00c Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: support multiple mPIPE shims in tilegx network driver The initial driver support was for a single mPIPE shim on the chip (as is the case for the Gx36 hardware). The Gx72 chip has two mPIPE shims, so we extend the driver to handle that case. Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c index d0254aa..64883aa 100644 --- a/arch/tile/gxio/iorpc_mpipe_info.c +++ b/arch/tile/gxio/iorpc_mpipe_info.c @@ -16,6 +16,24 @@ #include "gxio/iorpc_mpipe_info.h" +struct instance_aux_param { + _gxio_mpipe_link_name_t name; +}; + +int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, + _gxio_mpipe_link_name_t name) +{ + struct instance_aux_param temp; + struct instance_aux_param *params = &temp; + + params->name = name; + + return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, + sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX); +} + +EXPORT_SYMBOL(gxio_mpipe_info_instance_aux); + struct enumerate_aux_param { _gxio_mpipe_link_name_t name; _gxio_mpipe_link_mac_t mac; diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index 0567cf0..5301a9f 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) int fd; int i; + if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX) + return -EINVAL; + snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index); fd = hv_dev_open((HV_VirtAddr) file, 0); + + context->fd = fd; + if (fd < 0) { if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) return fd; @@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) return -ENODEV; } - context->fd = fd; - /* Map in the MMIO space. */ context->mmio_cfg_base = (void __force *) iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET, @@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) for (i = 0; i < 8; i++) context->__stacks.stacks[i] = 255; + context->instance = mpipe_index; + return 0; fast_failed: iounmap((void __force __iomem *)(context->mmio_cfg_base)); cfg_failed: hv_dev_close(context->fd); + context->fd = -1; return -ENODEV; } @@ -496,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void) return contextp; } +int gxio_mpipe_link_instance(const char *link_name) +{ + _gxio_mpipe_link_name_t name; + gxio_mpipe_context_t *context = _gxio_get_link_context(); + + if (!context) + return GXIO_ERR_NO_DEVICE; + + strncpy(name.name, link_name, sizeof(name.name)); + name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0'; + + return gxio_mpipe_info_instance_aux(context, name); +} + int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) { int rv; diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h index 0bcf3f7..476c5e5 100644 --- a/arch/tile/include/gxio/iorpc_mpipe_info.h +++ b/arch/tile/include/gxio/iorpc_mpipe_info.h @@ -27,11 +27,15 @@ #include +#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250) #define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251) #define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) +int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, + _gxio_mpipe_link_name_t name); + int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, unsigned int idx, _gxio_mpipe_link_name_t * name, diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h index ed742e3..eb7fee4 100644 --- a/arch/tile/include/gxio/mpipe.h +++ b/arch/tile/include/gxio/mpipe.h @@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t; */ typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t; +/* + * Max # of mpipe instances. 2 currently. + */ +#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX + +#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX + /* Get the "va" field from an "idesc". * * This is the address at which the ingress hardware copied the first @@ -311,6 +318,9 @@ typedef struct { /* File descriptor for calling up to Linux (and thus the HV). */ int fd; + /* Corresponding mpipe instance #. */ + int instance; + /* The VA at which configuration registers are mapped. */ char *mmio_cfg_base; @@ -1716,6 +1726,24 @@ typedef struct { uint8_t mac; } gxio_mpipe_link_t; +/* Translate a link name to the instance number of the mPIPE shim which is + * connected to that link. This call does not verify whether the link is + * currently available, and does not reserve any link resources; + * gxio_mpipe_link_open() must be called to perform those functions. + * + * Typically applications will call this function to translate a link name + * to an mPIPE instance number; call gxio_mpipe_init(), passing it that + * instance number, to initialize the mPIPE shim; and then call + * gxio_mpipe_link_open(), passing it the same link name plus the mPIPE + * context, to configure the link. + * + * @param link_name Name of the link; see @ref gxio_mpipe_link_names. + * @return The mPIPE instance number which is associated with the named + * link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does + * not exist. + */ +extern int gxio_mpipe_link_instance(const char *link_name); + /* Retrieve one of this system's legal link names, and its MAC address. * * @param index Link name index. If a system supports N legal link names, diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h index 6cdae3b..c97e416 100644 --- a/arch/tile/include/hv/drv_mpipe_intf.h +++ b/arch/tile/include/hv/drv_mpipe_intf.h @@ -23,6 +23,9 @@ #include +/** Number of mPIPE instances supported */ +#define HV_MPIPE_INSTANCE_MAX (2) + /** Number of buffer stacks (32). */ #define HV_MPIPE_NUM_BUFFER_STACKS \ (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH) diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 2b1c31f..b80a91f 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -133,27 +133,31 @@ struct tile_net_tx_wake { /* Info for a specific cpu. */ struct tile_net_info { - /* The NAPI struct. */ - struct napi_struct napi; - /* Packet queue. */ - gxio_mpipe_iqueue_t iqueue; /* Our cpu. */ int my_cpu; - /* True if iqueue is valid. */ - bool has_iqueue; - /* NAPI flags. */ - bool napi_added; - bool napi_enabled; - /* Number of buffers (by kind) which must still be provided. */ - unsigned int num_needed_buffers[MAX_KINDS]; /* A timer for handling egress completions. */ struct hrtimer egress_timer; /* True if "egress_timer" is scheduled. */ bool egress_timer_scheduled; - /* Comps for each egress channel. */ - struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; - /* Transmit wake timer for each egress channel. */ - struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; + struct info_mpipe { + /* Packet queue. */ + gxio_mpipe_iqueue_t iqueue; + /* The NAPI struct. */ + struct napi_struct napi; + /* Number of buffers (by kind) which must still be provided. */ + unsigned int num_needed_buffers[MAX_KINDS]; + /* instance id. */ + int instance; + /* True if iqueue is valid. */ + bool has_iqueue; + /* NAPI flags. */ + bool napi_added; + bool napi_enabled; + /* Comps for each egress channel. */ + struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; + /* Transmit wake timer for each egress channel. */ + struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; + } mpipe[NR_MPIPE_MAX]; }; /* Info for egress on a particular egress channel. */ @@ -178,17 +182,54 @@ struct tile_net_priv { int loopify_channel; /* The egress channel (channel or loopify_channel). */ int echannel; + /* mPIPE instance, 0 or 1. */ + int instance; }; -/* Egress info, indexed by "priv->echannel" (lazily created as needed). */ -static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; +static struct mpipe_data { + /* The ingress irq. */ + int ingress_irq; -/* Devices currently associated with each channel. - * NOTE: The array entry can become NULL after ifconfig down, but - * we do not free the underlying net_device structures, so it is - * safe to use a pointer after reading it from this array. - */ -static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; + /* The "context" for all devices. */ + gxio_mpipe_context_t context; + + /* Egress info, indexed by "priv->echannel" + * (lazily created as needed). + */ + struct tile_net_egress + egress_for_echannel[TILE_NET_CHANNELS]; + + /* Devices currently associated with each channel. + * NOTE: The array entry can become NULL after ifconfig down, but + * we do not free the underlying net_device structures, so it is + * safe to use a pointer after reading it from this array. + */ + struct net_device + *tile_net_devs_for_channel[TILE_NET_CHANNELS]; + + /* The actual memory allocated for the buffer stacks. */ + void *buffer_stack_vas[MAX_KINDS]; + + /* The amount of memory allocated for each buffer stack. */ + size_t buffer_stack_bytes[MAX_KINDS]; + + /* The first buffer stack index + * (small = +0, large = +1, jumbo = +2). + */ + int first_buffer_stack; + + /* The buckets. */ + int first_bucket; + int num_buckets; + +} mpipe_data[NR_MPIPE_MAX] = { + [0 ... (NR_MPIPE_MAX - 1)] { + .ingress_irq = -1, + .first_buffer_stack = -1, + .first_bucket = -1, + .num_buckets = 1 + } +}; /* A mutex for "tile_net_devs_for_channel". */ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); @@ -196,8 +237,6 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); /* The per-cpu info. */ static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); -/* The "context" for all devices. */ -static gxio_mpipe_context_t context; /* The buffer size enums for each buffer stack. * See arch/tile/include/gxio/mpipe.h for the set of possible values. @@ -210,22 +249,6 @@ static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = { GXIO_MPIPE_BUFFER_SIZE_16384 }; -/* The actual memory allocated for the buffer stacks. */ -static void *buffer_stack_vas[MAX_KINDS]; - -/* The amount of memory allocated for each buffer stack. */ -static size_t buffer_stack_bytes[MAX_KINDS]; - -/* The first buffer stack index (small = +0, large = +1, jumbo = +2). */ -static int first_buffer_stack = -1; - -/* The buckets. */ -static int first_bucket = -1; -static int num_buckets = 1; - -/* The ingress irq. */ -static int ingress_irq = -1; - /* Text value of tile_net.cpus if passed as a module parameter. */ static char *network_cpus_string; @@ -241,6 +264,13 @@ static char *custom_str; /* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ static uint jumbo_num; +/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */ +static inline int mpipe_instance(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + return priv->instance; +} + /* The "tile_net.cpus" argument specifies the cpus that are dedicated * to handle ingress packets. * @@ -314,8 +344,9 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field) } /* Allocate and push a buffer. */ -static bool tile_net_provide_buffer(int kind) +static bool tile_net_provide_buffer(int instance, int kind) { + struct mpipe_data *md = &mpipe_data[instance]; gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); const unsigned long buffer_alignment = 128; @@ -337,7 +368,7 @@ static bool tile_net_provide_buffer(int kind) /* Make sure "skb" and the back-pointer have been flushed. */ wmb(); - gxio_mpipe_push_buffer(&context, first_buffer_stack + kind, + gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind, (void *)va_to_tile_io_addr(skb->data)); return true; @@ -363,11 +394,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va) return skb; } -static void tile_net_pop_all_buffers(int stack) +static void tile_net_pop_all_buffers(int instance, int stack) { + struct mpipe_data *md = &mpipe_data[instance]; + for (;;) { tile_io_addr_t addr = - (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); + (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context, + stack); if (addr == 0) break; dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); @@ -378,17 +412,21 @@ static void tile_net_pop_all_buffers(int stack) static void tile_net_provide_needed_buffers(void) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - int kind; - - for (kind = 0; kind < MAX_KINDS; kind++) { - while (info->num_needed_buffers[kind] != 0) { - if (!tile_net_provide_buffer(kind)) { - /* Add info to the allocation failure dump. */ - pr_notice("Tile %d still needs some buffers\n", - info->my_cpu); - return; + int instance, kind; + for (instance = 0; instance < NR_MPIPE_MAX && + info->mpipe[instance].has_iqueue; instance++) { + for (kind = 0; kind < MAX_KINDS; kind++) { + while (info->mpipe[instance].num_needed_buffers[kind] + != 0) { + if (!tile_net_provide_buffer(instance, kind)) { + pr_notice("Tile %d still needs" + " some buffers\n", + info->my_cpu); + return; + } + info->mpipe[instance]. + num_needed_buffers[kind]--; } - info->num_needed_buffers[kind]--; } } } @@ -412,6 +450,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, gxio_mpipe_idesc_t *idesc, unsigned long len) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); + int instance = mpipe_instance(dev); /* Encode the actual packet length. */ skb_put(skb, len); @@ -422,7 +461,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, if (idesc->cs && idesc->csum_seed_val == 0xFFFF) skb->ip_summed = CHECKSUM_UNNECESSARY; - napi_gro_receive(&info->napi, skb); + napi_gro_receive(&info->mpipe[instance].napi, skb); /* Update stats. */ tile_net_stats_add(1, &dev->stats.rx_packets); @@ -430,18 +469,19 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, /* Need a new buffer. */ if (idesc->size == buffer_size_enums[0]) - info->num_needed_buffers[0]++; + info->mpipe[instance].num_needed_buffers[0]++; else if (idesc->size == buffer_size_enums[1]) - info->num_needed_buffers[1]++; + info->mpipe[instance].num_needed_buffers[1]++; else - info->num_needed_buffers[2]++; + info->mpipe[instance].num_needed_buffers[2]++; } /* Handle a packet. Return true if "processed", false if "filtered". */ -static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) +static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; + struct mpipe_data *md = &mpipe_data[instance]; + struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; uint8_t l2_offset; void *va; void *buf; @@ -477,7 +517,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) if (dev) tile_net_stats_add(1, &dev->stats.rx_dropped); drop: - gxio_mpipe_iqueue_drop(&info->iqueue, idesc); + gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc); } else { struct sk_buff *skb = mpipe_buf_to_skb(va); @@ -487,7 +527,7 @@ drop: tile_net_receive_skb(dev, skb, idesc, len); } - gxio_mpipe_iqueue_consume(&info->iqueue, idesc); + gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc); return !filter; } @@ -508,14 +548,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget) struct tile_net_info *info = &__get_cpu_var(per_cpu_info); unsigned int work = 0; gxio_mpipe_idesc_t *idesc; - int i, n; - - /* Process packets. */ - while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { + int instance, i, n; + struct mpipe_data *md; + struct info_mpipe *info_mpipe = + container_of(napi, struct info_mpipe, napi); + + instance = info_mpipe->instance; + while ((n = gxio_mpipe_iqueue_try_peek( + &info_mpipe->iqueue, + &idesc)) > 0) { for (i = 0; i < n; i++) { if (i == TILE_NET_BATCH) goto done; - if (tile_net_handle_packet(idesc + i)) { + if (tile_net_handle_packet(instance, + idesc + i)) { if (++work >= budget) goto done; } @@ -523,14 +569,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } /* There are no packets left. */ - napi_complete(&info->napi); + napi_complete(&info_mpipe->napi); + md = &mpipe_data[instance]; /* Re-enable hypervisor interrupts. */ - gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); + gxio_mpipe_enable_notif_ring_interrupt( + &md->context, info->mpipe[instance].iqueue.ring); /* HACK: Avoid the "rotting packet" problem. */ - if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) - napi_schedule(&info->napi); + if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0) + napi_schedule(&info_mpipe->napi); /* ISSUE: Handle completions? */ @@ -540,11 +588,11 @@ done: return work; } -/* Handle an ingress interrupt on the current cpu. */ -static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) +/* Handle an ingress interrupt from an instance on the current cpu. */ +static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - napi_schedule(&info->napi); + napi_schedule(&info->mpipe[(uint64_t)id].napi); return IRQ_HANDLED; } @@ -586,7 +634,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev, { struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); struct tile_net_priv *priv = netdev_priv(dev); - struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel]; + int instance = priv->instance; + struct tile_net_tx_wake *tx_wake = + &info->mpipe[instance].tx_wake[priv->echannel]; hrtimer_start(&tx_wake->timer, ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), @@ -624,7 +674,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) struct tile_net_info *info = &__get_cpu_var(per_cpu_info); unsigned long irqflags; bool pending = false; - int i; + int i, instance; local_irq_save(irqflags); @@ -632,13 +682,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) info->egress_timer_scheduled = false; /* Free all possible comps for this tile. */ - for (i = 0; i < TILE_NET_CHANNELS; i++) { - struct tile_net_egress *egress = &egress_for_echannel[i]; - struct tile_net_comps *comps = info->comps_for_echannel[i]; - if (comps->comp_last >= comps->comp_next) - continue; - tile_net_free_comps(egress->equeue, comps, -1, true); - pending = pending || (comps->comp_last < comps->comp_next); + for (instance = 0; instance < NR_MPIPE_MAX && + info->mpipe[instance].has_iqueue; instance++) { + for (i = 0; i < TILE_NET_CHANNELS; i++) { + struct tile_net_egress *egress = + &mpipe_data[instance].egress_for_echannel[i]; + struct tile_net_comps *comps = + info->mpipe[instance].comps_for_echannel[i]; + if (!egress || comps->comp_last >= comps->comp_next) + continue; + tile_net_free_comps(egress->equeue, comps, -1, true); + pending = pending || + (comps->comp_last < comps->comp_next); + } } /* Reschedule timer if needed. */ @@ -650,13 +706,15 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) return HRTIMER_NORESTART; } -/* Helper function for "tile_net_update()". */ -static void manage_ingress_irq(void *enable) +/* Helper functions for "tile_net_update()". */ +static void enable_ingress_irq(void *irq) { - if (enable) - enable_percpu_irq(ingress_irq, 0); - else - disable_percpu_irq(ingress_irq); + enable_percpu_irq((long)irq, 0); +} + +static void disable_ingress_irq(void *irq) +{ + disable_percpu_irq((long)irq); } /* Helper function for tile_net_open() and tile_net_stop(). @@ -666,19 +724,22 @@ static int tile_net_update(struct net_device *dev) { static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ bool saw_channel = false; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; int channel; int rc; int cpu; - gxio_mpipe_rules_init(&rules, &context); + saw_channel = false; + gxio_mpipe_rules_init(&rules, &md->context); for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { - if (tile_net_devs_for_channel[channel] == NULL) + if (md->tile_net_devs_for_channel[channel] == NULL) continue; if (!saw_channel) { saw_channel = true; - gxio_mpipe_rules_begin(&rules, first_bucket, - num_buckets, NULL); + gxio_mpipe_rules_begin(&rules, md->first_bucket, + md->num_buckets, NULL); gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); } gxio_mpipe_rules_add_channel(&rules, channel); @@ -689,7 +750,8 @@ static int tile_net_update(struct net_device *dev) */ rc = gxio_mpipe_rules_commit(&rules); if (rc != 0) { - netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); + netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n", + instance, rc); return -EIO; } @@ -697,35 +759,38 @@ static int tile_net_update(struct net_device *dev) * We use on_each_cpu to handle the IPI mask or unmask. */ if (!saw_channel) - on_each_cpu(manage_ingress_irq, (void *)0, 1); + on_each_cpu(disable_ingress_irq, + (void *)(long)(md->ingress_irq), 1); for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - if (!info->has_iqueue) + + if (!info->mpipe[instance].has_iqueue) continue; if (saw_channel) { - if (!info->napi_added) { - netif_napi_add(dev, &info->napi, + if (!info->mpipe[instance].napi_added) { + netif_napi_add(dev, &info->mpipe[instance].napi, tile_net_poll, TILE_NET_WEIGHT); - info->napi_added = true; + info->mpipe[instance].napi_added = true; } - if (!info->napi_enabled) { - napi_enable(&info->napi); - info->napi_enabled = true; + if (!info->mpipe[instance].napi_enabled) { + napi_enable(&info->mpipe[instance].napi); + info->mpipe[instance].napi_enabled = true; } } else { - if (info->napi_enabled) { - napi_disable(&info->napi); - info->napi_enabled = false; + if (info->mpipe[instance].napi_enabled) { + napi_disable(&info->mpipe[instance].napi); + info->mpipe[instance].napi_enabled = false; } /* FIXME: Drain the iqueue. */ } } if (saw_channel) - on_each_cpu(manage_ingress_irq, (void *)1, 1); + on_each_cpu(enable_ingress_irq, + (void *)(long)(md->ingress_irq), 1); /* HACK: Allow packets to flow in the simulator. */ if (saw_channel) - sim_enable_mpipe_links(0, -1); + sim_enable_mpipe_links(instance, -1); return 0; } @@ -735,46 +800,52 @@ static int create_buffer_stack(struct net_device *dev, int kind, size_t num_buffers) { pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); - int stack_idx = first_buffer_stack + kind; + int stack_idx = md->first_buffer_stack + kind; void *va; int i, rc; /* Round up to 64KB and then use alloc_pages() so we get the * required 64KB alignment. */ - buffer_stack_bytes[kind] = ALIGN(needed, 64 * 1024); + md->buffer_stack_bytes[kind] = + ALIGN(needed, 64 * 1024); - va = alloc_pages_exact(buffer_stack_bytes[kind], GFP_KERNEL); + va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL); if (va == NULL) { netdev_err(dev, "Could not alloc %zd bytes for buffer stack %d\n", - buffer_stack_bytes[kind], kind); + md->buffer_stack_bytes[kind], kind); return -ENOMEM; } /* Initialize the buffer stack. */ - rc = gxio_mpipe_init_buffer_stack(&context, stack_idx, - buffer_size_enums[kind], - va, buffer_stack_bytes[kind], 0); + rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx, + buffer_size_enums[kind], va, + md->buffer_stack_bytes[kind], 0); if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); - free_pages_exact(va, buffer_stack_bytes[kind]); + netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n", + instance, rc); + free_pages_exact(va, md->buffer_stack_bytes[kind]); return rc; } - buffer_stack_vas[kind] = va; + md->buffer_stack_vas[kind] = va; - rc = gxio_mpipe_register_client_memory(&context, stack_idx, + rc = gxio_mpipe_register_client_memory(&md->context, stack_idx, hash_pte, 0); if (rc != 0) { - netdev_err(dev, "gxio_mpipe_register_client_memory: %d\n", rc); + netdev_err(dev, + "gxio_mpipe_register_client_memory: mpipe[%d] %d\n", + instance, rc); return rc; } /* Provide initial buffers. */ for (i = 0; i < num_buffers; i++) { - if (!tile_net_provide_buffer(kind)) { + if (!tile_net_provide_buffer(instance, kind)) { netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); return -ENOMEM; } @@ -793,14 +864,18 @@ static int init_buffer_stacks(struct net_device *dev, int num_kinds = MAX_KINDS - (jumbo_num == 0); size_t num_buffers; int rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; /* Allocate the buffer stacks. */ - rc = gxio_mpipe_alloc_buffer_stacks(&context, num_kinds, 0, 0); + rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0); if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks: %d\n", rc); + netdev_err(dev, + "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n", + instance, rc); return rc; } - first_buffer_stack = rc; + md->first_buffer_stack = rc; /* Enough small/large buffers to (normally) avoid buffer errors. */ num_buffers = @@ -829,6 +904,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); int order, i, rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; struct page *page; void *addr; @@ -843,7 +920,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, addr = pfn_to_kaddr(page_to_pfn(page)); memset(addr, 0, COMPS_SIZE); for (i = 0; i < TILE_NET_CHANNELS; i++) - info->comps_for_echannel[i] = + info->mpipe[instance].comps_for_echannel[i] = addr + i * sizeof(struct tile_net_comps); /* If this is a network cpu, create an iqueue. */ @@ -857,14 +934,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, return -ENOMEM; } addr = pfn_to_kaddr(page_to_pfn(page)); - rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, - addr, NOTIF_RING_SIZE, 0); + rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue, + &md->context, ring++, addr, + NOTIF_RING_SIZE, 0); if (rc < 0) { netdev_err(dev, "gxio_mpipe_iqueue_init failed: %d\n", rc); return rc; } - info->has_iqueue = true; + info->mpipe[instance].has_iqueue = true; } return ring; @@ -877,40 +955,41 @@ static int init_notif_group_and_buckets(struct net_device *dev, int ring, int network_cpus_count) { int group, rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; /* Allocate one NotifGroup. */ - rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); + rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0); if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", - rc); + netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n", + instance, rc); return rc; } group = rc; /* Initialize global num_buckets value. */ if (network_cpus_count > 4) - num_buckets = 256; + md->num_buckets = 256; else if (network_cpus_count > 1) - num_buckets = 16; + md->num_buckets = 16; /* Allocate some buckets, and set global first_bucket value. */ - rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); + rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0); if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); + netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n", + instance, rc); return rc; } - first_bucket = rc; + md->first_bucket = rc; /* Init group and buckets. */ rc = gxio_mpipe_init_notif_group_and_buckets( - &context, group, ring, network_cpus_count, - first_bucket, num_buckets, + &md->context, group, ring, network_cpus_count, + md->first_bucket, md->num_buckets, GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); if (rc != 0) { - netdev_err( - dev, - "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", - rc); + netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: " + "mpipe[%d] %d\n", instance, rc); return rc; } @@ -924,30 +1003,39 @@ static int init_notif_group_and_buckets(struct net_device *dev, */ static int tile_net_setup_interrupts(struct net_device *dev) { - int cpu, rc; + int cpu, rc, irq; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + + irq = md->ingress_irq; + if (irq < 0) { + irq = create_irq(); + if (irq < 0) { + netdev_err(dev, + "create_irq failed: mpipe[%d] %d\n", + instance, irq); + return irq; + } + tile_irq_activate(irq, TILE_IRQ_PERCPU); - rc = create_irq(); - if (rc < 0) { - netdev_err(dev, "create_irq failed: %d\n", rc); - return rc; - } - ingress_irq = rc; - tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); - rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, - 0, "tile_net", NULL); - if (rc != 0) { - netdev_err(dev, "request_irq failed: %d\n", rc); - destroy_irq(ingress_irq); - ingress_irq = -1; - return rc; + rc = request_irq(irq, tile_net_handle_ingress_irq, + 0, "tile_net", (void *)((uint64_t)instance)); + + if (rc != 0) { + netdev_err(dev, "request_irq failed: mpipe[%d] %d\n", + instance, rc); + destroy_irq(irq); + return rc; + } + md->ingress_irq = irq; } for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - if (info->has_iqueue) { - gxio_mpipe_request_notif_ring_interrupt( - &context, cpu_x(cpu), cpu_y(cpu), - KERNEL_PL, ingress_irq, info->iqueue.ring); + if (info->mpipe[instance].has_iqueue) { + gxio_mpipe_request_notif_ring_interrupt(&md->context, + cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq, + info->mpipe[instance].iqueue.ring); } } @@ -955,40 +1043,45 @@ static int tile_net_setup_interrupts(struct net_device *dev) } /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ -static void tile_net_init_mpipe_fail(void) +static void tile_net_init_mpipe_fail(int instance) { int kind, cpu; + struct mpipe_data *md = &mpipe_data[instance]; /* Do cleanups that require the mpipe context first. */ for (kind = 0; kind < MAX_KINDS; kind++) { - if (buffer_stack_vas[kind] != NULL) { - tile_net_pop_all_buffers(first_buffer_stack + kind); + if (md->buffer_stack_vas[kind] != NULL) { + tile_net_pop_all_buffers(instance, + md->first_buffer_stack + + kind); } } /* Destroy mpipe context so the hardware no longer owns any memory. */ - gxio_mpipe_destroy(&context); + gxio_mpipe_destroy(&md->context); for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - free_pages((unsigned long)(info->comps_for_echannel[0]), - get_order(COMPS_SIZE)); - info->comps_for_echannel[0] = NULL; - free_pages((unsigned long)(info->iqueue.idescs), + free_pages( + (unsigned long)( + info->mpipe[instance].comps_for_echannel[0]), + get_order(COMPS_SIZE)); + info->mpipe[instance].comps_for_echannel[0] = NULL; + free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs), get_order(NOTIF_RING_SIZE)); - info->iqueue.idescs = NULL; + info->mpipe[instance].iqueue.idescs = NULL; } for (kind = 0; kind < MAX_KINDS; kind++) { - if (buffer_stack_vas[kind] != NULL) { - free_pages_exact(buffer_stack_vas[kind], - buffer_stack_bytes[kind]); - buffer_stack_vas[kind] = NULL; + if (md->buffer_stack_vas[kind] != NULL) { + free_pages_exact(md->buffer_stack_vas[kind], + md->buffer_stack_bytes[kind]); + md->buffer_stack_vas[kind] = NULL; } } - first_buffer_stack = -1; - first_bucket = -1; + md->first_buffer_stack = -1; + md->first_bucket = -1; } /* The first time any tilegx network device is opened, we initialize @@ -1005,6 +1098,8 @@ static int tile_net_init_mpipe(struct net_device *dev) int rc; int cpu; int first_ring, ring; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; int network_cpus_count = cpus_weight(network_cpus_map); if (!hash_default) { @@ -1012,9 +1107,10 @@ static int tile_net_init_mpipe(struct net_device *dev) return -EIO; } - rc = gxio_mpipe_init(&context, 0); + rc = gxio_mpipe_init(&md->context, instance); if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); + netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n", + instance, rc); return -EIO; } @@ -1024,7 +1120,8 @@ static int tile_net_init_mpipe(struct net_device *dev) goto fail; /* Allocate one NotifRing for each network cpu. */ - rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); + rc = gxio_mpipe_alloc_notif_rings(&md->context, + network_cpus_count, 0, 0); if (rc < 0) { netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", rc); @@ -1054,7 +1151,7 @@ static int tile_net_init_mpipe(struct net_device *dev) return 0; fail: - tile_net_init_mpipe_fail(); + tile_net_init_mpipe_fail(instance); return rc; } @@ -1072,9 +1169,11 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) int headers_order, edescs_order, equeue_order; size_t edescs_size; int rc = -ENOMEM; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; /* Only initialize once. */ - if (egress_for_echannel[echannel].equeue != NULL) + if (md->egress_for_echannel[echannel].equeue != NULL) return 0; /* Allocate memory for the "headers". */ @@ -1113,20 +1212,21 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) /* Allocate an edma ring (using a one entry "free list"). */ if (ering < 0) { - rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); + rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0); if (rc < 0) { - netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: %d\n", - rc); + netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: " + "mpipe[%d] %d\n", instance, rc); goto fail_equeue; } ering = rc; } /* Initialize the equeue. */ - rc = gxio_mpipe_equeue_init(equeue, &context, ering, echannel, + rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel, edescs, edescs_size, 0); if (rc != 0) { - netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); + netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n", + instance, rc); goto fail_equeue; } @@ -1143,8 +1243,8 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) } /* Done. */ - egress_for_echannel[echannel].equeue = equeue; - egress_for_echannel[echannel].headers = headers; + md->egress_for_echannel[echannel].equeue = equeue; + md->egress_for_echannel[echannel].headers = headers; return 0; fail_equeue: @@ -1164,9 +1264,12 @@ fail: static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, const char *link_name) { - int rc = gxio_mpipe_link_open(link, &context, link_name, 0); + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0); if (rc < 0) { - netdev_err(dev, "Failed to open '%s'\n", link_name); + netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n", + link_name, instance, rc); return rc; } if (jumbo_num != 0) { @@ -1193,12 +1296,21 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, static int tile_net_open(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); - int cpu, rc; + int cpu, rc, instance; mutex_lock(&tile_net_devs_for_channel_mutex); - /* Do one-time initialization the first time any device is opened. */ - if (ingress_irq < 0) { + /* Get the instance info. */ + rc = gxio_mpipe_link_instance(dev->name); + if (rc < 0 || rc >= NR_MPIPE_MAX) + return -EIO; + + priv->instance = rc; + instance = rc; + if (!mpipe_data[rc].context.mmio_fast_base) { + /* Do one-time initialization per instance the first time + * any device is opened. + */ rc = tile_net_init_mpipe(dev); if (rc != 0) goto fail; @@ -1229,7 +1341,7 @@ static int tile_net_open(struct net_device *dev) if (rc != 0) goto fail; - tile_net_devs_for_channel[priv->channel] = dev; + mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev; rc = tile_net_update(dev); if (rc != 0) @@ -1241,7 +1353,7 @@ static int tile_net_open(struct net_device *dev) for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_tx_wake *tx_wake = - &info->tx_wake[priv->echannel]; + &info->mpipe[instance].tx_wake[priv->echannel]; hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -1267,7 +1379,7 @@ fail: priv->channel = -1; } priv->echannel = -1; - tile_net_devs_for_channel[priv->channel] = NULL; + mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL; mutex_unlock(&tile_net_devs_for_channel_mutex); /* Don't return raw gxio error codes to generic Linux. */ @@ -1279,18 +1391,20 @@ static int tile_net_stop(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int cpu; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_tx_wake *tx_wake = - &info->tx_wake[priv->echannel]; + &info->mpipe[instance].tx_wake[priv->echannel]; hrtimer_cancel(&tx_wake->timer); netif_stop_subqueue(dev, cpu); } mutex_lock(&tile_net_devs_for_channel_mutex); - tile_net_devs_for_channel[priv->channel] = NULL; + md->tile_net_devs_for_channel[priv->channel] = NULL; (void)tile_net_update(dev); if (priv->loopify_channel >= 0) { if (gxio_mpipe_link_close(&priv->loopify_link) != 0) @@ -1500,6 +1614,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, struct sk_buff *skb, unsigned char *headers, s64 slot) { struct skb_shared_info *sh = skb_shinfo(skb); + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); unsigned int data_len = skb->len - sh_len; unsigned int p_len = sh->gso_size; @@ -1522,8 +1638,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, edesc_head.xfer_size = sh_len; /* This is only used to specify the TLB. */ - edesc_head.stack_idx = first_buffer_stack; - edesc_body.stack_idx = first_buffer_stack; + edesc_head.stack_idx = md->first_buffer_stack; + edesc_body.stack_idx = md->first_buffer_stack; /* Egress all the edescs. */ for (segment = 0; segment < sh->gso_segs; segment++) { @@ -1598,8 +1714,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_priv *priv = netdev_priv(dev); int channel = priv->echannel; - struct tile_net_egress *egress = &egress_for_echannel[channel]; - struct tile_net_comps *comps = info->comps_for_echannel[channel]; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; + struct tile_net_egress *egress = &md->egress_for_echannel[channel]; + struct tile_net_comps *comps = + info->mpipe[instance].comps_for_echannel[channel]; gxio_mpipe_equeue_t *equeue = egress->equeue; unsigned long irqflags; int num_edescs; @@ -1663,10 +1782,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_priv *priv = netdev_priv(dev); - struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; + struct tile_net_egress *egress = + &md->egress_for_echannel[priv->echannel]; gxio_mpipe_equeue_t *equeue = egress->equeue; struct tile_net_comps *comps = - info->comps_for_echannel[priv->echannel]; + info->mpipe[instance].comps_for_echannel[priv->echannel]; unsigned int len = skb->len; unsigned char *data = skb->data; unsigned int num_edescs; @@ -1683,7 +1805,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); /* This is only used to specify the TLB. */ - edesc.stack_idx = first_buffer_stack; + edesc.stack_idx = md->first_buffer_stack; /* Prepare the edescs. */ for (i = 0; i < num_edescs; i++) { @@ -1790,9 +1912,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p) */ static void tile_net_netpoll(struct net_device *dev) { - disable_percpu_irq(ingress_irq); - tile_net_handle_ingress_irq(ingress_irq, NULL); - enable_percpu_irq(ingress_irq, 0); + int instance = mpipe_instance(dev); + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); + struct mpipe_data *md = &mpipe_data[instance]; + + disable_percpu_irq(md->ingress_irq); + napi_schedule(&info->mpipe[instance].napi); + enable_percpu_irq(md->ingress_irq, 0); } #endif @@ -1895,9 +2021,12 @@ static void tile_net_init_module_percpu(void *unused) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); int my_cpu = smp_processor_id(); + int instance; - info->has_iqueue = false; - + for (instance = 0; instance < NR_MPIPE_MAX; instance++) { + info->mpipe[instance].has_iqueue = false; + info->mpipe[instance].instance = instance; + } info->my_cpu = my_cpu; /* Initialize the egress timer. */ @@ -1914,6 +2043,8 @@ static int __init tile_net_init_module(void) pr_info("Tilera Network Driver\n"); + BUILD_BUG_ON(NR_MPIPE_MAX != 2); + mutex_init(&tile_net_devs_for_channel_mutex); /* Initialize each CPU. */ -- cgit v0.10.2 From 2c7d04a9c4c3cb7212bb7c66989f0f4a35ed6a26 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: support TSO for IPv6 in tilegx network driver Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index b80a91f..6d94d58 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -1512,20 +1513,20 @@ static int tso_count_edescs(struct sk_buff *skb) return num_edescs; } -/* Prepare modified copies of the skbuff headers. - * FIXME: add support for IPv6. - */ +/* Prepare modified copies of the skbuff headers. */ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, s64 slot) { struct skb_shared_info *sh = skb_shinfo(skb); struct iphdr *ih; + struct ipv6hdr *ih6; struct tcphdr *th; unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); unsigned int data_len = skb->len - sh_len; unsigned char *data = skb->data; unsigned int ih_off, th_off, p_len; unsigned int isum_seed, tsum_seed, id, seq; + int is_ipv6; long f_id = -1; /* id of the current fragment */ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ long f_used = 0; /* bytes used from the current fragment */ @@ -1533,18 +1534,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, int segment; /* Locate original headers and compute various lengths. */ - ih = ip_hdr(skb); + is_ipv6 = skb_is_gso_v6(skb); + if (is_ipv6) { + ih6 = ipv6_hdr(skb); + ih_off = skb_network_offset(skb); + } else { + ih = ip_hdr(skb); + ih_off = skb_network_offset(skb); + isum_seed = ((0xFFFF - ih->check) + + (0xFFFF - ih->tot_len) + + (0xFFFF - ih->id)); + id = ntohs(ih->id); + } + th = tcp_hdr(skb); - ih_off = skb_network_offset(skb); th_off = skb_transport_offset(skb); p_len = sh->gso_size; - /* Set up seed values for IP and TCP csum and initialize id and seq. */ - isum_seed = ((0xFFFF - ih->check) + - (0xFFFF - ih->tot_len) + - (0xFFFF - ih->id)); tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); - id = ntohs(ih->id); seq = ntohl(th->seq); /* Prepare all the headers. */ @@ -1558,11 +1565,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, memcpy(buf, data, sh_len); /* Update copied ip header. */ - ih = (struct iphdr *)(buf + ih_off); - ih->tot_len = htons(sh_len + p_len - ih_off); - ih->id = htons(id); - ih->check = csum_long(isum_seed + ih->tot_len + - ih->id) ^ 0xffff; + if (is_ipv6) { + ih6 = (struct ipv6hdr *)(buf + ih_off); + ih6->payload_len = htons(sh_len + p_len - ih_off - + sizeof(*ih6)); + } else { + ih = (struct iphdr *)(buf + ih_off); + ih->tot_len = htons(sh_len + p_len - ih_off); + ih->id = htons(id); + ih->check = csum_long(isum_seed + ih->tot_len + + ih->id) ^ 0xffff; + } /* Update copied tcp header. */ th = (struct tcphdr *)(buf + th_off); @@ -1954,6 +1967,7 @@ static void tile_net_setup(struct net_device *dev) features |= NETIF_F_HW_CSUM; features |= NETIF_F_SG; features |= NETIF_F_TSO; + features |= NETIF_F_TSO6; dev->hw_features |= features; dev->vlan_features |= features; -- cgit v0.10.2 From 4aa02644b9c4e2f43ba0c13fefa39e9fd3a71da3 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: make "tile_net.custom" a proper bool module parameter Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 6d94d58..7302e84 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -256,11 +256,11 @@ static char *network_cpus_string; /* The actual cpus in "network_cpus". */ static struct cpumask network_cpus_map; -/* If "loopify=LINK" was specified, this is "LINK". */ +/* If "tile_net.loopify=LINK" was specified, this is "LINK". */ static char *loopify_link_name; -/* If "tile_net.custom" was specified, this is non-NULL. */ -static char *custom_str; +/* If "tile_net.custom" was specified, this is true. */ +static bool custom_flag; /* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ static uint jumbo_num; @@ -323,7 +323,7 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); /* The "tile_net.custom" argument causes us to ignore the "conventional" * classifier metadata, in particular, the "l2_offset". */ -module_param_named(custom, custom_str, charp, 0444); +module_param_named(custom, custom_flag, bool, 0444); MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); /* The "tile_net.jumbo" argument causes us to support "jumbo" packets, @@ -501,7 +501,7 @@ static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) } /* Get the "l2_offset", if allowed. */ - l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); + l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */ va = tile_io_addr_to_va((unsigned long)idesc->va); -- cgit v0.10.2 From 84e181ba3607a4e1175d929c346c72f9bfa1ab02 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: remove deprecated NETIF_F_LLTX flag from tile drivers Signed-off-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 7302e84..0beed7e 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -1963,7 +1963,6 @@ static void tile_net_setup(struct net_device *dev) dev->watchdog_timeo = TILE_NET_TIMEOUT; dev->mtu = 1500; - features |= NETIF_F_LLTX; features |= NETIF_F_HW_CSUM; features |= NETIF_F_SG; features |= NETIF_F_TSO; diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index fd17af5..106be47 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -2253,7 +2253,6 @@ static void tile_net_setup(struct net_device *dev) dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; dev->mtu = TILE_NET_MTU; - features |= NETIF_F_LLTX; features |= NETIF_F_HW_CSUM; features |= NETIF_F_SG; -- cgit v0.10.2 From 9ab5ec59c8a9cc0e4b94252b48200b6023c716aa Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 1 Aug 2013 11:36:42 -0400 Subject: tile: support PTP using the tilegx mPIPE (IEEE 1588) Signed-off-by: Chris Metcalf Acked-by: Richard Cochran Signed-off-by: David S. Miller diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c index c2fb1516..4f8f3d6 100644 --- a/arch/tile/gxio/iorpc_mpipe.c +++ b/arch/tile/gxio/iorpc_mpipe.c @@ -475,6 +475,25 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); +struct adjust_timestamp_freq_param { + int32_t ppb; +}; + +int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, + int32_t ppb) +{ + struct adjust_timestamp_freq_param temp; + struct adjust_timestamp_freq_param *params = &temp; + + params->ppb = ppb; + + return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, + sizeof(*params), + GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ); +} + +EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq); + struct config_edma_ring_blks_param { unsigned int ering; unsigned int max_blks; diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h index eef60fd..fdd07f8 100644 --- a/arch/tile/include/gxio/iorpc_mpipe.h +++ b/arch/tile/include/gxio/iorpc_mpipe.h @@ -46,10 +46,11 @@ #define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212) #define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213) -#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e) -#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f) -#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220) +#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121e) +#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121f) +#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1220) #define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221) +#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1222) #define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) #define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) #define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) @@ -128,6 +129,9 @@ int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, int64_t nsec); +int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, + int32_t ppb); + int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h index eb7fee4..e37cf4f 100644 --- a/arch/tile/include/gxio/mpipe.h +++ b/arch/tile/include/gxio/mpipe.h @@ -1854,4 +1854,18 @@ extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta); +/** Adjust the mPIPE timestamp clock frequency. + * + * @param context An initialized mPIPE context. + * @param ppb A 32-bit signed PPB (Parts Per Billion) value to adjust. + * The absolute value of ppb must be less than or equal to 1000000000. + * Values less than about 30000 will generally cause a GXIO_ERR_INVAL + * return due to the granularity of the hardware that converts reference + * clock cycles into seconds and nanoseconds. + * @return If the call was successful, zero; otherwise, a negative error + * code. + */ +extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context, + int32_t ppb); + #endif /* !_GXIO_MPIPE_H_ */ diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig index 098b1c4..4083ba8 100644 --- a/drivers/net/ethernet/tile/Kconfig +++ b/drivers/net/ethernet/tile/Kconfig @@ -15,3 +15,14 @@ config TILE_NET To compile this driver as a module, choose M here: the module will be called tile_net. + +config PTP_1588_CLOCK_TILEGX + tristate "Tilera TILE-Gx mPIPE as PTP clock" + select PTP_1588_CLOCK + depends on TILE_NET + depends on TILEGX + ---help--- + This driver adds support for using the mPIPE as a PTP + clock. This clock is only useful if your PTP programs are + getting hardware time stamps on the PTP Ethernet packets + using the SO_TIMESTAMPING API. diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 0beed7e..907b577 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -38,6 +38,8 @@ #include #include #include +#include +#include #include #include @@ -185,6 +187,10 @@ struct tile_net_priv { int echannel; /* mPIPE instance, 0 or 1. */ int instance; +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + /* The timestamp config. */ + struct hwtstamp_config stamp_cfg; +#endif }; static struct mpipe_data { @@ -223,6 +229,15 @@ static struct mpipe_data { int first_bucket; int num_buckets; +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + /* PTP-specific data. */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; + + /* Lock for ptp accessors. */ + struct mutex ptp_lock; +#endif + } mpipe_data[NR_MPIPE_MAX] = { [0 ... (NR_MPIPE_MAX - 1)] { .ingress_irq = -1, @@ -432,6 +447,94 @@ static void tile_net_provide_needed_buffers(void) } } +/* Get RX timestamp, and store it in the skb. */ +static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb, + gxio_mpipe_idesc_t *idesc) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec, + idesc->time_stamp_ns); + } +#endif +} + +/* Get TX timestamp, and store it in the skb. */ +static void tile_tx_timestamp(struct sk_buff *skb, int instance) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + struct skb_shared_info *shtx = skb_shinfo(skb); + if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) { + struct mpipe_data *md = &mpipe_data[instance]; + struct skb_shared_hwtstamps shhwtstamps; + struct timespec ts; + + shtx->tx_flags |= SKBTX_IN_PROGRESS; + gxio_mpipe_get_timestamp(&md->context, &ts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); + } +#endif +} + +/* Use ioctl() to enable or disable TX or RX timestamping. */ +static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq, + int cmd) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + struct hwtstamp_config config; + struct tile_net_priv *priv = netdev_priv(dev); + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) /* reserved for future extensions */ + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (copy_to_user(rq->ifr_data, &config, sizeof(config))) + return -EFAULT; + + priv->stamp_cfg = config; + return 0; +#else + return -EOPNOTSUPP; +#endif +} + static inline bool filter_packet(struct net_device *dev, void *buf) { /* Filter packets received before we're up. */ @@ -451,7 +554,8 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, gxio_mpipe_idesc_t *idesc, unsigned long len) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - int instance = mpipe_instance(dev); + struct tile_net_priv *priv = netdev_priv(dev); + int instance = priv->instance; /* Encode the actual packet length. */ skb_put(skb, len); @@ -462,6 +566,9 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, if (idesc->cs && idesc->csum_seed_val == 0xFFFF) skb->ip_summed = CHECKSUM_UNNECESSARY; + /* Get RX timestamp from idesc. */ + tile_rx_timestamp(priv, skb, idesc); + napi_gro_receive(&info->mpipe[instance].napi, skb); /* Update stats. */ @@ -707,6 +814,103 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) return HRTIMER_NORESTART; } +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + +/* PTP clock operations. */ + +static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb)) + ret = -EINVAL; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_adjust_timestamp(&md->context, delta)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_get_timestamp(&md->context, ts)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_set_timestamp(&md->context, ts)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info ptp_mpipe_caps = { + .owner = THIS_MODULE, + .name = "mPIPE clock", + .max_adj = 999999999, + .n_ext_ts = 0, + .pps = 0, + .adjfreq = ptp_mpipe_adjfreq, + .adjtime = ptp_mpipe_adjtime, + .gettime = ptp_mpipe_gettime, + .settime = ptp_mpipe_settime, + .enable = ptp_mpipe_enable, +}; + +#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */ + +/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */ +static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + struct timespec ts; + + getnstimeofday(&ts); + gxio_mpipe_set_timestamp(&md->context, &ts); + + mutex_init(&md->ptp_lock); + md->caps = ptp_mpipe_caps; + md->ptp_clock = ptp_clock_register(&md->caps, NULL); + if (IS_ERR(md->ptp_clock)) + netdev_err(dev, "ptp_clock_register failed %ld\n", + PTR_ERR(md->ptp_clock)); +#endif +} + +/* Initialize PTP fields in a new device. */ +static void init_ptp_dev(struct tile_net_priv *priv) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; +#endif +} + /* Helper functions for "tile_net_update()". */ static void enable_ingress_irq(void *irq) { @@ -1149,6 +1353,9 @@ static int tile_net_init_mpipe(struct net_device *dev) if (rc != 0) goto fail; + /* Register PTP clock and set mPIPE timestamp, if configured. */ + register_ptp_clock(dev, md); + return 0; fail: @@ -1851,6 +2058,9 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < num_edescs; i++) gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); + /* Store TX timestamp if needed. */ + tile_tx_timestamp(skb, instance); + /* Add a completion record. */ add_comp(equeue, comps, slot - 1, skb); @@ -1885,6 +2095,9 @@ static void tile_net_tx_timeout(struct net_device *dev) /* Ioctl commands. */ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { + if (cmd == SIOCSHWTSTAMP) + return tile_hwtstamp_ioctl(dev, rq, cmd); + return -EOPNOTSUPP; } @@ -2005,6 +2218,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac) priv->channel = -1; priv->loopify_channel = -1; priv->echannel = -1; + init_ptp_dev(priv); /* Get the MAC address and set it in the device struct; this must * be done before the device is opened. If the MAC is all zeroes, -- cgit v0.10.2 From 4beac0293fabb68125e1a9d2ce81d89343f8702d Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 1 Aug 2013 11:51:42 +0200 Subject: bonding: fix system hang due to fast igmp timer rescheduling After commit 4aa5dee4d9 ("net: convert resend IGMP to notifier event") we try to acquire rtnl in bond_resend_igmp_join_requests but it can be scheduled with rtnl already held (e.g. when bond_change_active_slave is called with rtnl) causing a loop of immediate reschedules + calls because rtnl_trylock fails each time since it's being already held. For me this issue leads to system hangs very easy: modprobe bonding; ifconfig bond0 up; ifenslave bond0 eth0; rmmod bonding; The fix is to introduce a small (1 jiffy) delay which is enough for the sections holding rtnl to finish without putting any strain on the system. Also adjust the timer in bond_change_active_slave to be 1 jiffy, since most of the time it's called with rtnl already held. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index da3af63..bc3578e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -723,7 +723,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc) static void bond_resend_igmp_join_requests(struct bonding *bond) { if (!rtnl_trylock()) { - queue_delayed_work(bond->wq, &bond->mcast_work, 0); + queue_delayed_work(bond->wq, &bond->mcast_work, 1); return; } call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev); @@ -1084,7 +1084,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) ((USES_PRIMARY(bond->params.mode) && new_active) || bond->params.mode == BOND_MODE_ROUNDROBIN)) { bond->igmp_retrans = bond->params.resend_igmp; - queue_delayed_work(bond->wq, &bond->mcast_work, 0); + queue_delayed_work(bond->wq, &bond->mcast_work, 1); } } -- cgit v0.10.2 From 8b09be5f173759c87159b2f300c18f2ace9587de Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Thu, 1 Aug 2013 17:30:59 +0300 Subject: bnx2x: Revising locking scheme for MAC configuration On very rare occasions, repeated load/unload stress test in the presence of our storage driver (bnx2i/bnx2fc) causes a kernel panic in bnx2x code (NULL pointer dereference). Stack traces indicate the issue happens during MAC configuration; thorough code review showed that indeed several races exist in which one thread can iterate over the list of configured MACs while another deletes entries from the same list. This patch adds a varient on the single-writer/Multiple-reader lock mechanism - It utilizes an already exsiting bottom-half lock, using it so that Whenever a writer is unable to continue due to the existence of another writer/reader, it pends its request for future deliverance. The writer / last readers will check for the existence of such requests and perform them instead of the original initiator. This prevents the writer from having to sleep while waiting for the lock to be accessible, which might cause deadlocks given the locks already held by the writer. Another result of this patch is that setting of Rx Mode is now made in sleepable context - Setting of Rx Mode is made under a bottom-half lock, which was always nontrivial for the bnx2x driver, as the HW/FW configuration requires wait for completions. Since sleep was impossible (due to the sleepless-context), various mechanisms were utilized to prevent the calling thread from sleep, but the truth was that when the caller thread (i.e, the one calling ndo_set_rx_mode()) returned, the Rx mode was still not set in HW/FW. bnx2x_set_rx_mode() will now overtly schedule for the Rx changes to be configured by the sp_rtnl_task which hold the RTNL lock and is sleepable context. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index dedbd76..439387b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1331,7 +1331,7 @@ enum { BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_VFPF_MCAST, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, - BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, + BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ee350bd..11abdcc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2060,7 +2060,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp) rparam.mcast_obj = &bp->mcast_obj; __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); - /* Add a DEL command... */ + /* Add a DEL command... - Since we're doing a driver cleanup only, + * we take a lock surrounding both the initial send and the CONTs, + * as we don't want a true completion to disrupt us in the middle. + */ + netif_addr_lock_bh(bp->dev); rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", @@ -2072,11 +2076,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp) if (rc < 0) { BNX2X_ERR("Failed to clean multi-cast object: %d\n", rc); + netif_addr_unlock_bh(bp->dev); return; } rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); } + netif_addr_unlock_bh(bp->dev); } #ifndef BNX2X_STOP_ON_ERROR @@ -2432,9 +2438,7 @@ int bnx2x_load_cnic(struct bnx2x *bp) } /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); + bnx2x_set_rx_mode_inner(bp); /* re-read iscsi info */ bnx2x_get_iscsi_info(bp); @@ -2704,9 +2708,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start fast path */ /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); + bnx2x_set_rx_mode_inner(bp); /* Start the Tx */ switch (load_mode) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c07a6d0..38be494 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); * netif_addr_lock_bh() */ void bnx2x_set_rx_mode(struct net_device *dev); +void bnx2x_set_rx_mode_inner(struct bnx2x *bp); /** * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e5da078..bd064c6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9628,11 +9628,9 @@ sp_rtnl_not_reset: } } - if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, - &bp->sp_rtnl_state)) { - DP(BNX2X_MSG_SP, - "sending set storm rx mode vf pf channel message from rtnl sp-task\n"); - bnx2x_vfpf_storm_rx_mode(bp); + if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); + bnx2x_set_rx_mode_inner(bp); } if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, @@ -11849,34 +11847,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - u32 rx_mode = BNX2X_RX_MODE_NORMAL; if (bp->state != BNX2X_STATE_OPEN) { DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); return; + } else { + /* Schedule an SP task to handle rest of change */ + DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); } +} + +void bnx2x_set_rx_mode_inner(struct bnx2x *bp) +{ + u32 rx_mode = BNX2X_RX_MODE_NORMAL; DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); - if (dev->flags & IFF_PROMISC) + netif_addr_lock_bh(bp->dev); + + if (bp->dev->flags & IFF_PROMISC) { rx_mode = BNX2X_RX_MODE_PROMISC; - else if ((dev->flags & IFF_ALLMULTI) || - ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && - CHIP_IS_E1(bp))) + } else if ((bp->dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) { rx_mode = BNX2X_RX_MODE_ALLMULTI; - else { + } else { if (IS_PF(bp)) { /* some multicasts */ if (bnx2x_set_mc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_ALLMULTI; + /* release bh lock, as bnx2x_set_uc_list might sleep */ + netif_addr_unlock_bh(bp->dev); if (bnx2x_set_uc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_PROMISC; + netif_addr_lock_bh(bp->dev); } else { /* configuring mcast to a vf involves sleeping (when we - * wait for the pf's response). Since this function is - * called from non sleepable context we must schedule - * a work item for this purpose + * wait for the pf's response). */ smp_mb__before_clear_bit(); set_bit(BNX2X_SP_RTNL_VFPF_MCAST, @@ -11894,22 +11906,20 @@ void bnx2x_set_rx_mode(struct net_device *dev) /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + netif_addr_unlock_bh(bp->dev); return; } if (IS_PF(bp)) { bnx2x_set_storm_rx_mode(bp); + netif_addr_unlock_bh(bp->dev); } else { - /* configuring rx mode to storms in a vf involves sleeping (when - * we wait for the pf's response). Since this function is - * called from non sleepable context we must schedule - * a work item for this purpose + /* VF will need to request the PF to make this change, and so + * the VF needs to release the bottom-half lock prior to the + * request (as it will likely require sleep on the VF side) */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + netif_addr_unlock_bh(bp->dev); + bnx2x_vfpf_storm_rx_mode(bp); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8f03c98..1d46b68 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending( } } -static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o) -{ - spin_lock_bh(&o->lock); - - __bnx2x_exe_queue_reset_pending(bp, o); - - spin_unlock_bh(&o->lock); -} - /** * bnx2x_exe_queue_step - execute one execution chunk atomically * @@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, * @o: queue * @ramrod_flags: flags * - * (Atomicity is ensured using the exe_queue->lock). + * (Should be called while holding the exe_queue->lock). */ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, @@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, memset(&spacer, 0, sizeof(spacer)); - spin_lock_bh(&o->lock); - /* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW @@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); __bnx2x_exe_queue_reset_pending(bp, o); } else { - spin_unlock_bh(&o->lock); return 1; } } @@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, } /* Sanity check */ - if (!cur_len) { - spin_unlock_bh(&o->lock); + if (!cur_len) return 0; - } rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) @@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, */ __bnx2x_exe_queue_reset_pending(bp, o); - spin_unlock_bh(&o->lock); return rc; } @@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) return true; } +/** + * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details: Non-blocking implementation; should be called under execution + * queue lock. + */ +static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (o->head_reader) { + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n"); + return -EBUSY; + } + + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n"); + return 0; +} + +/** + * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock; notice it might release + * and reclaim it during its run. + */ +static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + unsigned long ramrod_flags = o->saved_ramrod_flags; + + DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n", + ramrod_flags); + o->head_exe_request = false; + o->saved_ramrod_flags = 0; + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); + if (rc != 0) { + BNX2X_ERR("execution of pending commands failed with rc %d\n", + rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } +} + +/** + * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run + * + * @bp: device handle + * @o: vlan_mac object + * @ramrod_flags: ramrod flags of missed execution + * + * @details Should be called under execution queue lock. + */ +static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long ramrod_flags) +{ + o->head_exe_request = true; + o->saved_ramrod_flags = ramrod_flags; + DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n", + ramrod_flags); +} + +/** + * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would perform it - possibly releasing and + * reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* It's possible a new pending execution was added since this writer + * executed. If so, execute again. [Ad infinitum] + */ + while (o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n"); + __bnx2x_vlan_mac_h_exec_pending(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would perform it - + * possibly releasing and reclaiming the execution queue lock. + */ +void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_write_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); +} + +/** + * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under the execution queue lock. May sleep. May + * release and reclaim execution queue lock during its run. + */ +static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* If we got here, we're holding lock --> no WRITER exists */ + o->head_reader++; + DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n", + o->head_reader); + + return 0; +} + +/** + * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details May sleep. Claims and releases execution queue lock during its run. + */ +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + + spin_lock_bh(&o->exe_queue.lock); + rc = __bnx2x_vlan_mac_h_read_lock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + +/** + * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would be performed if this was the last + * reader. possibly releasing and reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (!o->head_reader) { + BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } else { + o->head_reader--; + DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n", + o->head_reader); + } + + /* It's possible a new pending execution was added, and that this reader + * was last - if so we need to execute the command. + */ + if (!o->head_reader && o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n"); + + /* Writer release will do the trick */ + __bnx2x_vlan_mac_h_write_unlock(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would be performed if this + * was the last reader. Claims and releases the execution queue lock + * during its run. + */ +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_read_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); +} + static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, int n, u8 *base, u8 stride, u8 size) { struct bnx2x_vlan_mac_registry_elem *pos; u8 *next = base; int counter = 0; + int read_lock; + + DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); /* traverse list */ list_for_each_entry(pos, &o->head, link) { @@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, next += stride + size; } } + + if (read_lock == 0) { + DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + } + return counter * ETH_ALEN; } @@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp, return -EBUSY; } +static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *ramrod_flags) +{ + int rc = 0; + + spin_lock_bh(&o->exe_queue.lock); + + DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n"); + rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); + + if (rc != 0) { + __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); + + /* Calling function should not diffrentiate between this case + * and the case in which there is already a pending ramrod + */ + rc = 1; + } else { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + } + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + /** * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod * @@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; int rc; + /* Clearing the pending list & raw state should be made + * atomically (as execution flow assumes they represent the same). + */ + spin_lock_bh(&o->exe_queue.lock); + /* Reset pending list */ - bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); + __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); /* Clear pending */ r->clear_pending(r); + spin_unlock_bh(&o->exe_queue.lock); + /* If ramrod failed this is most likely a SW bug */ if (cqe->message.error) return -EINVAL; /* Run the next bulk of pending commands if requested */ if (test_bit(RAMROD_CONT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); + if (rc < 0) return rc; } @@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd( * @p: * */ -int bnx2x_config_vlan_mac( - struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p) +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) { int rc = 0; struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; @@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac( /* Execute commands if required */ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } @@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac( return rc; /* Make a next step */ - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, - ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, + p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } @@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, unsigned long *ramrod_flags) { struct bnx2x_vlan_mac_registry_elem *pos = NULL; - int rc = 0; struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + int read_lock; + int rc = 0; /* Clear pending commands first */ @@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, __clear_bit(RAMROD_EXEC, &p.ramrod_flags); __clear_bit(RAMROD_CONT, &p.ramrod_flags); + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + return read_lock; + list_for_each_entry(pos, &o->head, link) { if (pos->vlan_mac_flags == *vlan_mac_flags) { p.user_req.vlan_mac_flags = pos->vlan_mac_flags; @@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, rc = bnx2x_config_vlan_mac(bp, &p); if (rc < 0) { BNX2X_ERR("Failed to add a new DEL command\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); return rc; } } } + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + p.ramrod_flags = *ramrod_flags; __set_bit(RAMROD_CONT, &p.ramrod_flags); @@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, struct bnx2x_credit_pool_obj *vlans_pool) { INIT_LIST_HEAD(&o->head); + o->head_reader = 0; + o->head_exe_request = false; + o->saved_ramrod_flags = 0; o->macs_pool = macs_pool; o->vlans_pool = vlans_pool; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 798dfe9..533a3ab 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj { * entries. */ struct list_head head; + /* Implement a simple reader/writer lock on the head list. + * all these fields should only be accessed under the exe_queue lock + */ + u8 head_reader; /* Num. of readers accessing head list */ + bool head_exe_request; /* Pending execution request. */ + unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ /* TODO: Add it's initialization in the init functions */ struct bnx2x_exe_queue_obj exe_queue; @@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, struct bnx2x_credit_pool_obj *macs_pool, struct bnx2x_credit_pool_obj *vlans_pool); +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); int bnx2x_config_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p); + struct bnx2x_vlan_mac_ramrod_params *p); int bnx2x_vlan_mac_move(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861ef..6291324 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -491,12 +491,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp, * and a valid credit counter */ if (!vfop->rc && args->credit) { - int cnt = 0; struct list_head *pos; + int read_lock; + int cnt = 0; + + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); list_for_each(pos, &obj->head) cnt++; + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); + atomic_set(args->credit, cnt); } } -- cgit v0.10.2 From 439677d766ba9095e5afc4a30147f65bc363b6e7 Mon Sep 17 00:00:00 2001 From: "fan.du" Date: Thu, 1 Aug 2013 17:44:44 +0800 Subject: ipv6: bump genid when delete/add address Server Client 2001:1::803/64 <-> 2001:1::805/64 2001:2::804/64 <-> 2001:2::806/64 Server side fib binary tree looks like this: (2001:/64) / / ffff88002103c380 / \ (2) / \ (2001::803/128) ffff880037ac07c0 / \ / \ (3) ffff880037ac0640 (2001::806/128) / \ (1) / \ (2001::804/128) (2001::805/128) Delete 2001::804/64 won't cause prefix route deleted as well as rt in (3) destinate to 2001::806 with source address as 2001::804/64. That's because 2001::803/64 is still alive, which make onlink=1 in ipv6_del_addr, this is where the substantial difference between same prefix configuration and different prefix configuration :) So packet are still transmitted out to 2001::806 with source address as 2001::804/64. So bump genid will clear rt in (3), and up layer protocol will eventually find the right one for themselves. This problem arised from the discussion in here: http://marc.info/?l=linux-netdev&m=137404469219410&w=4 Signed-off-by: Fan Du Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index cfdcf7b..e7780d7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4653,6 +4653,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) break; } atomic_inc(&net->ipv6.dev_addr_genid); + rt_genid_bump_ipv6(net); } static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) -- cgit v0.10.2 From dec1e90e8c7157a527faad95023d96dbc114fbac Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Thu, 1 Aug 2013 16:54:47 +0200 Subject: bonding: convert to list API and replace bond's custom list This patch aims to remove struct bonding's first_slave and struct slave's next and prev pointers, and replace them with the standard Linux list API. The old macros are converted to list API as well and some new primitives are available now. The checks if there're slaves that used slave_cnt have been replaced by the list_empty macro. Also a few small style fixes, changing longest -> shortest line in local variable declarations, leaving an empty line before return and removing unnecessary brackets. This is the first step to gradual RCU conversion. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 390061d..7d46fa8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -143,10 +143,9 @@ static inline struct bonding *__get_bond_by_port(struct port *port) */ static inline struct port *__get_first_port(struct bonding *bond) { - if (bond->slave_cnt == 0) - return NULL; + struct slave *first_slave = bond_first_slave(bond); - return &(SLAVE_AD_INFO(bond->first_slave).port); + return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL; } /** @@ -159,13 +158,16 @@ static inline struct port *__get_first_port(struct bonding *bond) static inline struct port *__get_next_port(struct port *port) { struct bonding *bond = __get_bond_by_port(port); - struct slave *slave = port->slave; + struct slave *slave = port->slave, *slave_next; // If there's no bond for this port, or this is the last slave - if ((bond == NULL) || (slave->next == bond->first_slave)) + if (bond == NULL) + return NULL; + slave_next = bond_next_slave(bond, slave); + if (!slave_next || bond_is_first_slave(bond, slave_next)) return NULL; - return &(SLAVE_AD_INFO(slave->next).port); + return &(SLAVE_AD_INFO(slave_next).port); } /** @@ -178,12 +180,14 @@ static inline struct port *__get_next_port(struct port *port) static inline struct aggregator *__get_first_agg(struct port *port) { struct bonding *bond = __get_bond_by_port(port); + struct slave *first_slave; // If there's no bond for this port, or bond has no slaves - if ((bond == NULL) || (bond->slave_cnt == 0)) + if (bond == NULL) return NULL; + first_slave = bond_first_slave(bond); - return &(SLAVE_AD_INFO(bond->first_slave).aggregator); + return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL; } /** @@ -195,14 +199,17 @@ static inline struct aggregator *__get_first_agg(struct port *port) */ static inline struct aggregator *__get_next_agg(struct aggregator *aggregator) { - struct slave *slave = aggregator->slave; + struct slave *slave = aggregator->slave, *slave_next; struct bonding *bond = bond_get_bond_by_slave(slave); // If there's no bond for this aggregator, or this is the last slave - if ((bond == NULL) || (slave->next == bond->first_slave)) + if (bond == NULL) + return NULL; + slave_next = bond_next_slave(bond, slave); + if (!slave_next || bond_is_first_slave(bond, slave_next)) return NULL; - return &(SLAVE_AD_INFO(slave->next).aggregator); + return &(SLAVE_AD_INFO(slave_next).aggregator); } /* @@ -2110,7 +2117,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) read_lock(&bond->lock); //check if there are any slaves - if (bond->slave_cnt == 0) + if (list_empty(&bond->slave_list)) goto re_arm; // check if agg_select_timer timer after initialize is timed out @@ -2336,8 +2343,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) int bond_3ad_set_carrier(struct bonding *bond) { struct aggregator *active; + struct slave *first_slave; - active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator)); + first_slave = bond_first_slave(bond); + if (!first_slave) + return 0; + active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator)); if (active) { /* are enough slaves available to consider link up? */ if (active->num_of_ports < bond->params.min_links) { @@ -2432,7 +2443,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; if (agg && (agg->aggregator_identifier == agg_id)) { @@ -2501,7 +2512,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, */ void bond_3ad_update_lacp_rate(struct bonding *bond) { - int i; struct slave *slave; struct port *port = NULL; int lacp_fast; @@ -2509,7 +2519,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) write_lock_bh(&bond->lock); lacp_fast = bond->params.lacp_fast; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { port = &(SLAVE_AD_INFO(slave).port); if (port->slave == NULL) continue; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 4ea8ed1..4d35196 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -224,13 +224,12 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) { struct slave *slave, *least_loaded; long long max_gap; - int i; least_loaded = NULL; max_gap = LLONG_MIN; /* Find the slave with the largest gap */ - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (SLAVE_IS_OK(slave)) { long long gap = compute_gap(slave); @@ -386,11 +385,10 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond) struct slave *rx_slave, *slave, *start_at; int i = 0; - if (bond_info->next_rx_slave) { + if (bond_info->next_rx_slave) start_at = bond_info->next_rx_slave; - } else { - start_at = bond->first_slave; - } + else + start_at = bond_first_slave(bond); rx_slave = NULL; @@ -405,7 +403,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond) } if (rx_slave) { - bond_info->next_rx_slave = rx_slave->next; + slave = bond_next_slave(bond, rx_slave); + bond_info->next_rx_slave = slave; } return rx_slave; @@ -1173,9 +1172,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav { struct slave *tmp_slave1, *free_mac_slave = NULL; struct slave *has_bond_addr = bond->curr_active_slave; - int i; - if (bond->slave_cnt == 0) { + if (list_empty(&bond->slave_list)) { /* this is the first slave */ return 0; } @@ -1196,7 +1194,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav /* The slave's address is equal to the address of the bond. * Search for a spare address in the bond for this slave. */ - bond_for_each_slave(bond, tmp_slave1, i) { + bond_for_each_slave(bond, tmp_slave1) { if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) { /* no slave has tmp_slave1's perm addr * as its curr addr @@ -1246,17 +1244,15 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav */ static int alb_set_mac_address(struct bonding *bond, void *addr) { - struct sockaddr sa; - struct slave *slave, *stop_at; char tmp_addr[ETH_ALEN]; + struct slave *slave; + struct sockaddr sa; int res; - int i; - if (bond->alb_info.rlb_enabled) { + if (bond->alb_info.rlb_enabled) return 0; - } - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { /* save net_device's current hw address */ memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); @@ -1276,8 +1272,7 @@ unwind: sa.sa_family = bond->dev->type; /* unwind from head to the slave that failed */ - stop_at = slave; - bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) { + bond_for_each_slave_continue_reverse(bond, slave) { memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); dev_set_mac_address(slave->dev, &sa); memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN); @@ -1460,11 +1455,10 @@ void bond_alb_monitor(struct work_struct *work) alb_work.work); struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *slave; - int i; read_lock(&bond->lock); - if (bond->slave_cnt == 0) { + if (list_empty(&bond->slave_list)) { bond_info->tx_rebalance_counter = 0; bond_info->lp_counter = 0; goto re_arm; @@ -1482,9 +1476,8 @@ void bond_alb_monitor(struct work_struct *work) */ read_lock(&bond->curr_slave_lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) alb_send_learning_packets(slave, slave->dev->dev_addr); - } read_unlock(&bond->curr_slave_lock); @@ -1496,7 +1489,7 @@ void bond_alb_monitor(struct work_struct *work) read_lock(&bond->curr_slave_lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { tlb_clear_slave(bond, slave, 1); if (slave == bond->curr_active_slave) { SLAVE_TLB_INFO(slave).load = @@ -1602,9 +1595,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) */ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) { - if (bond->slave_cnt > 1) { + if (!list_empty(&bond->slave_list)) alb_change_hw_addr_on_detach(bond, slave); - } tlb_clear_slave(bond, slave, 0); @@ -1661,9 +1653,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave { struct slave *swap_slave; - if (bond->curr_active_slave == new_slave) { + if (bond->curr_active_slave == new_slave) return; - } if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) { dev_set_promiscuity(bond->curr_active_slave->dev, -1); @@ -1674,9 +1665,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave swap_slave = bond->curr_active_slave; bond->curr_active_slave = new_slave; - if (!new_slave || (bond->slave_cnt == 0)) { + if (!new_slave || list_empty(&bond->slave_list)) return; - } /* set the new curr_active_slave to the bonds mac address * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave @@ -1689,9 +1679,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave * ignored so we can mess with their MAC addresses without * fear of interference from transmit activity. */ - if (swap_slave) { + if (swap_slave) tlb_clear_slave(bond, swap_slave, 1); - } tlb_clear_slave(bond, new_slave, 1); write_unlock_bh(&bond->curr_slave_lock); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bc3578e..2e8ec8b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -441,10 +441,10 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, __be16 proto, u16 vid) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave, *stop_at; - int i, res; + struct slave *slave; + int res; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { res = vlan_vid_add(slave->dev, proto, vid); if (res) goto unwind; @@ -461,8 +461,7 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, unwind: /* unwind from head to the slave that failed */ - stop_at = slave; - bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) + bond_for_each_slave_continue_reverse(bond, slave) vlan_vid_del(slave->dev, proto, vid); return res; @@ -478,9 +477,9 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - int i, res; + int res; - bond_for_each_slave(bond, slave, i) + bond_for_each_slave(bond, slave) vlan_vid_del(slave->dev, proto, vid); res = bond_del_vlan(bond, vid); @@ -532,15 +531,14 @@ static void bond_del_vlans_from_slave(struct bonding *bond, static int bond_set_carrier(struct bonding *bond) { struct slave *slave; - int i; - if (bond->slave_cnt == 0) + if (list_empty(&bond->slave_list)) goto down; if (bond->params.mode == BOND_MODE_8023AD) return bond_3ad_set_carrier(bond); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (slave->link == BOND_LINK_UP) { if (!netif_carrier_ok(bond->dev)) { netif_carrier_on(bond->dev); @@ -681,8 +679,8 @@ static int bond_set_promiscuity(struct bonding *bond, int inc) } } else { struct slave *slave; - int i; - bond_for_each_slave(bond, slave, i) { + + bond_for_each_slave(bond, slave) { err = dev_set_promiscuity(slave->dev, inc); if (err) return err; @@ -705,8 +703,8 @@ static int bond_set_allmulti(struct bonding *bond, int inc) } } else { struct slave *slave; - int i; - bond_for_each_slave(bond, slave, i) { + + bond_for_each_slave(bond, slave) { err = dev_set_allmulti(slave->dev, inc); if (err) return err; @@ -935,9 +933,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond) new_active = bond->curr_active_slave; if (!new_active) { /* there were no active slaves left */ - if (bond->slave_cnt > 0) /* found one slave */ - new_active = bond->first_slave; - else + new_active = bond_first_slave(bond); + if (!new_active) return NULL; /* still no slave, return NULL */ } @@ -1130,17 +1127,7 @@ void bond_select_active_slave(struct bonding *bond) */ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave) { - if (bond->first_slave == NULL) { /* attaching the first slave */ - new_slave->next = new_slave; - new_slave->prev = new_slave; - bond->first_slave = new_slave; - } else { - new_slave->next = bond->first_slave; - new_slave->prev = bond->first_slave->prev; - new_slave->next->prev = new_slave; - new_slave->prev->next = new_slave; - } - + list_add_tail(&new_slave->list, &bond->slave_list); bond->slave_cnt++; } @@ -1156,22 +1143,7 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave) */ static void bond_detach_slave(struct bonding *bond, struct slave *slave) { - if (slave->next) - slave->next->prev = slave->prev; - - if (slave->prev) - slave->prev->next = slave->next; - - if (bond->first_slave == slave) { /* slave is the first slave */ - if (bond->slave_cnt > 1) { /* there are more slave */ - bond->first_slave = slave->next; - } else { - bond->first_slave = NULL; /* slave was the last one */ - } - } - - slave->next = NULL; - slave->prev = NULL; + list_del(&slave->list); bond->slave_cnt--; } @@ -1222,9 +1194,8 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - int i; - bond_for_each_slave(bond, slave, i) + bond_for_each_slave(bond, slave) if (IS_UP(slave->dev)) slave_disable_netpoll(slave); } @@ -1233,9 +1204,9 @@ static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, g { struct bonding *bond = netdev_priv(dev); struct slave *slave; - int i, err = 0; + int err = 0; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { err = slave_enable_netpoll(slave); if (err) { bond_netpoll_cleanup(dev); @@ -1265,11 +1236,10 @@ static netdev_features_t bond_fix_features(struct net_device *dev, struct slave *slave; struct bonding *bond = netdev_priv(dev); netdev_features_t mask; - int i; read_lock(&bond->lock); - if (!bond->first_slave) { + if (list_empty(&bond->slave_list)) { /* Disable adding VLANs to empty bond. But why? --mq */ features |= NETIF_F_VLAN_CHALLENGED; goto out; @@ -1279,7 +1249,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev, features &= ~NETIF_F_ONE_FOR_ALL; features |= NETIF_F_ALL_FOR_ALL; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { features = netdev_increment_features(features, slave->dev->features, mask); @@ -1303,15 +1273,14 @@ static void bond_compute_features(struct bonding *bond) unsigned short max_hard_header_len = ETH_HLEN; unsigned int gso_max_size = GSO_MAX_SIZE; u16 gso_max_segs = GSO_MAX_SEGS; - int i; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; read_lock(&bond->lock); - if (!bond->first_slave) + if (list_empty(&bond->slave_list)) goto done; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { vlan_features = netdev_increment_features(vlan_features, slave->dev->vlan_features, BOND_VLAN_FEATURES); @@ -1499,7 +1468,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) * bond ether type mutual exclusion - don't allow slaves of dissimilar * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond */ - if (bond->slave_cnt == 0) { + if (list_empty(&bond->slave_list)) { if (bond_dev->type != slave_dev->type) { pr_debug("%s: change device type from %d to %d\n", bond_dev->name, @@ -1538,7 +1507,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } if (slave_ops->ndo_set_mac_address == NULL) { - if (bond->slave_cnt == 0) { + if (list_empty(&bond->slave_list)) { pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.", bond_dev->name); bond->params.fail_over_mac = BOND_FOM_ACTIVE; @@ -1554,7 +1523,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* If this is the first slave, then we need to set the master's hardware * address to be the same as the slave's. */ - if (!bond->slave_cnt && bond->dev->addr_assign_type == NET_ADDR_RANDOM) + if (list_empty(&bond->slave_list) && + bond->dev->addr_assign_type == NET_ADDR_RANDOM) bond_set_dev_addr(bond->dev, slave_dev); new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); @@ -1562,7 +1532,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = -ENOMEM; goto err_undo_flags; } - + INIT_LIST_HEAD(&new_slave->list); /* * Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. @@ -1748,15 +1718,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) */ bond_set_slave_inactive_flags(new_slave); /* if this is the first slave */ - if (bond->slave_cnt == 1) { + if (bond_first_slave(bond) == new_slave) { SLAVE_AD_INFO(new_slave).id = 1; /* Initialize AD with the number of times that the AD timer is called in 1 second * can be called only after the mac address of the bond is set */ bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); } else { + struct slave *prev_slave; + + prev_slave = bond_prev_slave(bond, new_slave); SLAVE_AD_INFO(new_slave).id = - SLAVE_AD_INFO(new_slave->prev).id + 1; + SLAVE_AD_INFO(prev_slave).id + 1; } bond_3ad_bind_slave(new_slave); @@ -1875,7 +1848,7 @@ err_free: err_undo_flags: bond_compute_features(bond); /* Enslave of first slave has failed and we need to fix master's mac */ - if (bond->slave_cnt == 0 && + if (list_empty(&bond->slave_list) && ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr)) eth_hw_addr_random(bond_dev); @@ -1931,15 +1904,6 @@ static int __bond_release_one(struct net_device *bond_dev, netdev_rx_handler_unregister(slave_dev); write_lock_bh(&bond->lock); - if (!all && !bond->params.fail_over_mac) { - if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) && - bond->slave_cnt > 1) - pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", - bond_dev->name, slave_dev->name, - slave->perm_hwaddr, - bond_dev->name, slave_dev->name); - } - /* Inform AD package of unbinding of slave. */ if (bond->params.mode == BOND_MODE_8023AD) { /* must be called before the slave is @@ -1960,6 +1924,15 @@ static int __bond_release_one(struct net_device *bond_dev, /* release the slave from its bond */ bond_detach_slave(bond, slave); + if (!all && !bond->params.fail_over_mac) { + if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) && + !list_empty(&bond->slave_list)) + pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", + bond_dev->name, slave_dev->name, + slave->perm_hwaddr, + bond_dev->name, slave_dev->name); + } + if (bond->primary_slave == slave) bond->primary_slave = NULL; @@ -1996,7 +1969,7 @@ static int __bond_release_one(struct net_device *bond_dev, write_lock_bh(&bond->lock); } - if (bond->slave_cnt == 0) { + if (list_empty(&bond->slave_list)) { bond_set_carrier(bond); eth_hw_addr_random(bond_dev); @@ -2011,7 +1984,7 @@ static int __bond_release_one(struct net_device *bond_dev, write_unlock_bh(&bond->lock); unblock_netpoll_tx(); - if (bond->slave_cnt == 0) { + if (list_empty(&bond->slave_list)) { call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); } @@ -2082,7 +2055,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = bond_release(bond_dev, slave_dev); - if ((ret == 0) && (bond->slave_cnt == 0)) { + if (ret == 0 && list_empty(&bond->slave_list)) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; pr_info("%s: destroying bond %s.\n", bond_dev->name, bond_dev->name); @@ -2167,13 +2140,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) { struct bonding *bond = netdev_priv(bond_dev); + int i = 0, res = -ENODEV; struct slave *slave; - int i, res = -ENODEV; read_lock(&bond->lock); - - bond_for_each_slave(bond, slave, i) { - if (i == (int)info->slave_id) { + bond_for_each_slave(bond, slave) { + if (i++ == (int)info->slave_id) { res = 0; strcpy(info->slave_name, slave->dev->name); info->link = slave->link; @@ -2182,7 +2154,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in break; } } - read_unlock(&bond->lock); return res; @@ -2193,13 +2164,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in static int bond_miimon_inspect(struct bonding *bond) { + int link_state, commit = 0; struct slave *slave; - int i, link_state, commit = 0; bool ignore_updelay; ignore_updelay = !bond->curr_active_slave ? true : false; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { slave->new_link = BOND_LINK_NOCHANGE; link_state = bond_check_dev_link(bond, slave->dev, 0); @@ -2294,9 +2265,8 @@ static int bond_miimon_inspect(struct bonding *bond) static void bond_miimon_commit(struct bonding *bond) { struct slave *slave; - int i; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { switch (slave->new_link) { case BOND_LINK_NOCHANGE: continue; @@ -2401,7 +2371,7 @@ void bond_mii_monitor(struct work_struct *work) delay = msecs_to_jiffies(bond->params.miimon); - if (bond->slave_cnt == 0) + if (list_empty(&bond->slave_list)) goto re_arm; should_notify_peers = bond_should_notify_peers(bond); @@ -2681,14 +2651,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work) struct slave *slave, *oldcurrent; int do_failover = 0; int delta_in_ticks, extra_ticks; - int i; read_lock(&bond->lock); delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); extra_ticks = delta_in_ticks / 2; - if (bond->slave_cnt == 0) + if (list_empty(&bond->slave_list)) goto re_arm; read_lock(&bond->curr_slave_lock); @@ -2703,7 +2672,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) * TODO: what about up/down delay in arp mode? it wasn't here before * so it can wait */ - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { unsigned long trans_start = dev_trans_start(slave->dev); if (slave->link != BOND_LINK_UP) { @@ -2800,10 +2769,10 @@ re_arm: */ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) { - struct slave *slave; - int i, commit = 0; unsigned long trans_start; + struct slave *slave; int extra_ticks; + int commit = 0; /* All the time comparisons below need some extra time. Otherwise, on * fast networks the ARP probe/reply may arrive within the same jiffy @@ -2812,7 +2781,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ extra_ticks = delta_in_ticks / 2; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { slave->new_link = BOND_LINK_NOCHANGE; if (slave->link != BOND_LINK_UP) { @@ -2891,11 +2860,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) { - struct slave *slave; - int i; unsigned long trans_start; + struct slave *slave; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { switch (slave->new_link) { case BOND_LINK_NOCHANGE: continue; @@ -2968,7 +2936,7 @@ do_failover: */ static void bond_ab_arp_probe(struct bonding *bond) { - struct slave *slave; + struct slave *slave, *next_slave; int i; read_lock(&bond->curr_slave_lock); @@ -2992,7 +2960,7 @@ static void bond_ab_arp_probe(struct bonding *bond) */ if (!bond->current_arp_slave) { - bond->current_arp_slave = bond->first_slave; + bond->current_arp_slave = bond_first_slave(bond); if (!bond->current_arp_slave) return; } @@ -3000,7 +2968,8 @@ static void bond_ab_arp_probe(struct bonding *bond) bond_set_slave_inactive_flags(bond->current_arp_slave); /* search for next candidate */ - bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) { + next_slave = bond_next_slave(bond, bond->current_arp_slave); + bond_for_each_slave_from(bond, slave, i, next_slave) { if (IS_UP(slave->dev)) { slave->link = BOND_LINK_BACK; bond_set_slave_active_flags(slave); @@ -3041,7 +3010,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); - if (bond->slave_cnt == 0) + if (list_empty(&bond->slave_list)) goto re_arm; should_notify_peers = bond_should_notify_peers(bond); @@ -3361,13 +3330,12 @@ static int bond_open(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - int i; /* reset slave->backup and slave->inactive */ read_lock(&bond->lock); - if (bond->slave_cnt > 0) { + if (!list_empty(&bond->slave_list)) { read_lock(&bond->curr_slave_lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) && (slave != bond->curr_active_slave)) { bond_set_slave_inactive_flags(slave); @@ -3435,13 +3403,11 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, struct bonding *bond = netdev_priv(bond_dev); struct rtnl_link_stats64 temp; struct slave *slave; - int i; memset(stats, 0, sizeof(*stats)); read_lock_bh(&bond->lock); - - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { const struct rtnl_link_stats64 *sstats = dev_get_stats(slave->dev, &temp); @@ -3471,7 +3437,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; stats->tx_window_errors += sstats->tx_window_errors; } - read_unlock_bh(&bond->lock); return stats; @@ -3610,7 +3575,6 @@ static void bond_set_rx_mode(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - int i; read_lock(&bond->lock); @@ -3623,7 +3587,7 @@ static void bond_set_rx_mode(struct net_device *bond_dev) } read_unlock(&bond->curr_slave_lock); } else { - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { dev_uc_sync_multiple(slave->dev, bond_dev); dev_mc_sync_multiple(slave->dev, bond_dev); } @@ -3635,16 +3599,15 @@ static void bond_set_rx_mode(struct net_device *bond_dev) static int bond_neigh_init(struct neighbour *n) { struct bonding *bond = netdev_priv(n->dev); - struct slave *slave = bond->first_slave; const struct net_device_ops *slave_ops; struct neigh_parms parms; + struct slave *slave; int ret; + slave = bond_first_slave(bond); if (!slave) return 0; - slave_ops = slave->dev->netdev_ops; - if (!slave_ops->ndo_neigh_setup) return 0; @@ -3687,9 +3650,8 @@ static int bond_neigh_setup(struct net_device *dev, static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave, *stop_at; + struct slave *slave; int res = 0; - int i; pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond, (bond_dev ? bond_dev->name : "None"), new_mtu); @@ -3709,10 +3671,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) * call to the base driver. */ - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { pr_debug("s %p s->p %p c_m %p\n", slave, - slave->prev, + bond_prev_slave(bond, slave), slave->dev->netdev_ops->ndo_change_mtu); res = dev_set_mtu(slave->dev, new_mtu); @@ -3737,8 +3699,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) unwind: /* unwind from head to the slave that failed */ - stop_at = slave; - bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) { + bond_for_each_slave_continue_reverse(bond, slave) { int tmp_res; tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu); @@ -3762,9 +3723,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) { struct bonding *bond = netdev_priv(bond_dev); struct sockaddr *sa = addr, tmp_sa; - struct slave *slave, *stop_at; + struct slave *slave; int res = 0; - int i; if (bond->params.mode == BOND_MODE_ALB) return bond_alb_set_mac_address(bond_dev, addr); @@ -3797,7 +3757,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) * call to the base driver. */ - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { const struct net_device_ops *slave_ops = slave->dev->netdev_ops; pr_debug("slave %p %s\n", slave, slave->dev->name); @@ -3829,8 +3789,7 @@ unwind: tmp_sa.sa_family = bond_dev->type; /* unwind from head to the slave that failed */ - stop_at = slave; - bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) { + bond_for_each_slave_continue_reverse(bond, slave) { int tmp_res; tmp_res = dev_set_mac_address(slave->dev, &tmp_sa); @@ -3874,7 +3833,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev */ slave_no = bond->rr_tx_counter++ % bond->slave_cnt; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { slave_no--; if (slave_no < 0) break; @@ -3940,7 +3899,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { slave_no--; if (slave_no < 0) break; @@ -4041,15 +4000,15 @@ static void bond_set_xmit_hash_policy(struct bonding *bond) static inline int bond_slave_override(struct bonding *bond, struct sk_buff *skb) { - int i, res = 1; struct slave *slave = NULL; struct slave *check_slave; + int res = 1; if (!skb->queue_mapping) return 1; /* Find out if any slaves have the same mapping as this skb. */ - bond_for_each_slave(bond, check_slave, i) { + bond_for_each_slave(bond, check_slave) { if (check_slave->queue_id == skb->queue_mapping) { slave = check_slave; break; @@ -4136,7 +4095,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) read_lock(&bond->lock); - if (bond->slave_cnt) + if (!list_empty(&bond->slave_list)) ret = __bond_start_xmit(skb, dev); else kfree_skb(skb); @@ -4182,9 +4141,8 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, struct ethtool_cmd *ecmd) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave; - int i; unsigned long speed = 0; + struct slave *slave; ecmd->duplex = DUPLEX_UNKNOWN; ecmd->port = PORT_OTHER; @@ -4195,7 +4153,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, * this is an accurate maximum. */ read_lock(&bond->lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (SLAVE_IS_OK(slave)) { if (slave->speed != SPEED_UNKNOWN) speed += slave->speed; @@ -4206,6 +4164,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, } ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN); read_unlock(&bond->lock); + return 0; } @@ -4269,7 +4228,7 @@ static void bond_setup(struct net_device *bond_dev) /* initialize rwlocks */ rwlock_init(&bond->lock); rwlock_init(&bond->curr_slave_lock); - + INIT_LIST_HEAD(&bond->slave_list); bond->params = bonding_defaults; /* Initialize pointers */ @@ -4326,13 +4285,14 @@ static void bond_setup(struct net_device *bond_dev) static void bond_uninit(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave, *tmp_slave; struct vlan_entry *vlan, *tmp; bond_netpoll_cleanup(bond_dev); /* Release the bonded slaves */ - while (bond->first_slave != NULL) - __bond_release_one(bond_dev, bond->first_slave->dev, true); + list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list) + __bond_release_one(bond_dev, slave->dev, true); pr_info("%s: released all slaves\n", bond_dev->name); list_del(&bond->bond_list); diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 4060d41..20a6ee2 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -12,7 +12,6 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) struct bonding *bond = seq->private; loff_t off = 0; struct slave *slave; - int i; /* make sure the bond won't be taken away */ rcu_read_lock(); @@ -21,10 +20,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) if (*pos == 0) return SEQ_START_TOKEN; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) if (++off == *pos) return slave; - } return NULL; } @@ -36,11 +34,13 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++*pos; if (v == SEQ_START_TOKEN) - return bond->first_slave; + return bond_first_slave(bond); - slave = slave->next; + if (bond_is_last_slave(bond, slave)) + return NULL; + slave = bond_next_slave(bond, slave); - return (slave == bond->first_slave) ? NULL : slave; + return slave; } static void bond_info_seq_stop(struct seq_file *seq, void *v) diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index ae02c19..0702e91 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -209,12 +209,12 @@ void bond_destroy_slave_symlinks(struct net_device *master, static ssize_t bonding_show_slaves(struct device *d, struct device_attribute *attr, char *buf) { - struct slave *slave; - int i, res = 0; struct bonding *bond = to_bond(d); + struct slave *slave; + int res = 0; read_lock(&bond->lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (res > (PAGE_SIZE - IFNAMSIZ)) { /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) @@ -227,6 +227,7 @@ static ssize_t bonding_show_slaves(struct device *d, read_unlock(&bond->lock); if (res) buf[res-1] = '\n'; /* eat the leftover space */ + return res; } @@ -325,7 +326,7 @@ static ssize_t bonding_store_mode(struct device *d, goto out; } - if (bond->slave_cnt > 0) { + if (!list_empty(&bond->slave_list)) { pr_err("unable to update mode of %s because it has slaves.\n", bond->dev->name); ret = -EPERM; @@ -507,7 +508,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d, if (!rtnl_trylock()) return restart_syscall(); - if (bond->slave_cnt != 0) { + if (!list_empty(&bond->slave_list)) { pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n", bond->dev->name); ret = -EPERM; @@ -668,7 +669,7 @@ static ssize_t bonding_store_arp_targets(struct device *d, &newtarget); /* not to race with bond_arp_rcv */ write_lock_bh(&bond->lock); - bond_for_each_slave(bond, slave, i) + bond_for_each_slave(bond, slave) slave->target_last_arp_rx[ind] = jiffies; targets[ind] = newtarget; write_unlock_bh(&bond->lock); @@ -694,7 +695,7 @@ static ssize_t bonding_store_arp_targets(struct device *d, &newtarget); write_lock_bh(&bond->lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { targets_rx = slave->target_last_arp_rx; j = ind; for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++) @@ -1085,10 +1086,9 @@ static ssize_t bonding_store_primary(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - int i; - struct slave *slave; struct bonding *bond = to_bond(d); char ifname[IFNAMSIZ]; + struct slave *slave; if (!rtnl_trylock()) return restart_syscall(); @@ -1114,7 +1114,7 @@ static ssize_t bonding_store_primary(struct device *d, goto out; } - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { pr_info("%s: Setting %s as primary slave.\n", bond->dev->name, slave->dev->name); @@ -1260,16 +1260,14 @@ static ssize_t bonding_store_active_slave(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - int i; - struct slave *slave; - struct slave *old_active = NULL; - struct slave *new_active = NULL; + struct slave *slave, *old_active, *new_active; struct bonding *bond = to_bond(d); char ifname[IFNAMSIZ]; if (!rtnl_trylock()) return restart_syscall(); + old_active = new_active = NULL; block_netpoll_tx(); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); @@ -1291,7 +1289,7 @@ static ssize_t bonding_store_active_slave(struct device *d, goto out; } - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { old_active = bond->curr_active_slave; new_active = slave; @@ -1475,15 +1473,15 @@ static ssize_t bonding_show_queue_id(struct device *d, struct device_attribute *attr, char *buf) { - struct slave *slave; - int i, res = 0; struct bonding *bond = to_bond(d); + struct slave *slave; + int res = 0; if (!rtnl_trylock()) return restart_syscall(); read_lock(&bond->lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { /* not enough space for another interface_name:queue_id pair */ if ((PAGE_SIZE - res) > 10) @@ -1498,6 +1496,7 @@ static ssize_t bonding_show_queue_id(struct device *d, if (res) buf[res-1] = '\n'; /* eat the leftover space */ rtnl_unlock(); + return res; } @@ -1512,7 +1511,7 @@ static ssize_t bonding_store_queue_id(struct device *d, struct slave *slave, *update_slave; struct bonding *bond = to_bond(d); u16 qid; - int i, ret = count; + int ret = count; char *delim; struct net_device *sdev = NULL; @@ -1547,7 +1546,7 @@ static ssize_t bonding_store_queue_id(struct device *d, /* Search for thes slave and check for duplicate qids */ update_slave = NULL; - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (sdev == slave->dev) /* * We don't need to check the matching @@ -1599,8 +1598,8 @@ static ssize_t bonding_store_slaves_active(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - int i, new_value, ret = count; struct bonding *bond = to_bond(d); + int new_value, ret = count; struct slave *slave; if (sscanf(buf, "%d", &new_value) != 1) { @@ -1623,7 +1622,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, } read_lock(&bond->lock); - bond_for_each_slave(bond, slave, i) { + bond_for_each_slave(bond, slave) { if (!bond_is_active_slave(slave)) { if (new_value) slave->inactive = 0; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 42d1c65..cfd9b49 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -71,6 +71,28 @@ set_fs(fs); \ res; }) +/* slave list primitives */ +#define bond_to_slave(ptr) list_entry(ptr, struct slave, list) + +/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */ +#define bond_first_slave(bond) \ + list_first_entry_or_null(&(bond)->slave_list, struct slave, list) +#define bond_last_slave(bond) \ + (list_empty(&(bond)->slave_list) ? NULL : \ + bond_to_slave((bond)->slave_list.prev)) + +#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list) +#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list) + +/* Since bond_first/last_slave can return NULL, these can return NULL too */ +#define bond_next_slave(bond, pos) \ + (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \ + bond_to_slave((pos)->list.next)) + +#define bond_prev_slave(bond, pos) \ + (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \ + bond_to_slave((pos)->list.prev)) + /** * bond_for_each_slave_from - iterate the slaves list from a starting point * @bond: the bond holding this list. @@ -80,37 +102,29 @@ * * Caller must hold bond->lock */ -#define bond_for_each_slave_from(bond, pos, cnt, start) \ - for (cnt = 0, pos = start; \ - cnt < (bond)->slave_cnt; \ - cnt++, pos = (pos)->next) +#define bond_for_each_slave_from(bond, pos, cnt, start) \ + for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \ + cnt++, pos = bond_next_slave(bond, pos)) /** - * bond_for_each_slave_from_to - iterate the slaves list from start point to stop point - * @bond: the bond holding this list. - * @pos: current slave. - * @cnt: counter for number max of moves - * @start: start point. - * @stop: stop point. + * bond_for_each_slave - iterate over all slaves + * @bond: the bond holding this list + * @pos: current slave * * Caller must hold bond->lock */ -#define bond_for_each_slave_from_to(bond, pos, cnt, start, stop) \ - for (cnt = 0, pos = start; \ - ((cnt < (bond)->slave_cnt) && (pos != (stop)->next)); \ - cnt++, pos = (pos)->next) +#define bond_for_each_slave(bond, pos) \ + list_for_each_entry(pos, &(bond)->slave_list, list) /** - * bond_for_each_slave - iterate the slaves list from head - * @bond: the bond holding this list. - * @pos: current slave. - * @cnt: counter for max number of moves + * bond_for_each_slave_reverse - iterate in reverse from a given position + * @bond: the bond holding this list + * @pos: slave to continue from * * Caller must hold bond->lock */ -#define bond_for_each_slave(bond, pos, cnt) \ - bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave) - +#define bond_for_each_slave_continue_reverse(bond, pos) \ + list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list) #ifdef CONFIG_NET_POLL_CONTROLLER extern atomic_t netpoll_block_tx; @@ -174,8 +188,7 @@ struct vlan_entry { struct slave { struct net_device *dev; /* first - useful for panic debug */ - struct slave *next; - struct slave *prev; + struct list_head list; struct bonding *bond; /* our master */ int delay; unsigned long jiffies; @@ -215,7 +228,7 @@ struct slave { */ struct bonding { struct net_device *dev; /* first - useful for panic debug */ - struct slave *first_slave; + struct list_head slave_list; struct slave *curr_active_slave; struct slave *current_arp_slave; struct slave *primary_slave; @@ -270,13 +283,10 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) { struct slave *slave = NULL; - int i; - bond_for_each_slave(bond, slave, i) { - if (slave->dev == slave_dev) { + bond_for_each_slave(bond, slave) + if (slave->dev == slave_dev) return slave; - } - } return NULL; } @@ -477,10 +487,9 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn) static inline struct slave *bond_slave_has_mac(struct bonding *bond, const u8 *mac) { - int i = 0; struct slave *tmp; - bond_for_each_slave(bond, tmp, i) + bond_for_each_slave(bond, tmp) if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) return tmp; -- cgit v0.10.2 From 71bc3b2dc5d02566afe1b23a7b72bfc8acdd04e1 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Thu, 1 Aug 2013 16:54:48 +0200 Subject: bonding: remove unnecessary read_locks of curr_slave_lock In all the cases we already hold bond->lock for reading, so the slave can't get away and the check != NULL is sufficient. curr_active_slave can still change after the read_lock is unlocked prior to use of the dereferenced value, so there's no need for it. It either contains a valid slave which we use (and can't get away), or it is NULL which is checked. In some places the read_lock of curr_slave_lock was left because we need it not to change while performing some action (e.g. syncing current active slave's addresses, sending ARP requests through the active slave) such cases will be dealt with individually while converting to RCU. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2e8ec8b..ac60b69 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2092,23 +2092,19 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi read_lock(&bond->lock); - read_lock(&bond->curr_slave_lock); old_active = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); - new_active = bond_get_slave_by_dev(bond, slave_dev); - /* * Changing to the current active: do nothing; return success. */ - if (new_active && (new_active == old_active)) { + if (new_active && new_active == old_active) { read_unlock(&bond->lock); return 0; } - if ((new_active) && - (old_active) && - (new_active->link == BOND_LINK_UP) && + if (new_active && + old_active && + new_active->link == BOND_LINK_UP && IS_UP(new_active->dev)) { block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); @@ -2660,10 +2656,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) if (list_empty(&bond->slave_list)) goto re_arm; - read_lock(&bond->curr_slave_lock); oldcurrent = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); - /* see if any of the previous devices are up now (i.e. they have * xmt and rcv traffic). the curr_active_slave does not come into * the picture unless it is null. also, slave->jiffies is not needed @@ -3818,11 +3811,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev */ if ((iph->protocol == IPPROTO_IGMP) && (skb->protocol == htons(ETH_P_IP))) { - - read_lock(&bond->curr_slave_lock); slave = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); - if (!slave) goto out; } else { @@ -3867,15 +3856,12 @@ out: static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave; int res = 1; - read_lock(&bond->curr_slave_lock); - - if (bond->curr_active_slave) - res = bond_dev_queue_xmit(bond, skb, - bond->curr_active_slave->dev); - - read_unlock(&bond->curr_slave_lock); + slave = bond->curr_active_slave; + if (slave) + res = bond_dev_queue_xmit(bond, skb, slave->dev); if (res) /* no suitable interface, frame not sent */ @@ -3935,10 +3921,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) int i; int res = 1; - read_lock(&bond->curr_slave_lock); start_at = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); - if (!start_at) goto out; -- cgit v0.10.2 From 78a646ced88450754613573f7d1fa7cb0de14bb3 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 1 Aug 2013 16:54:49 +0200 Subject: bonding: simplify broadcast_xmit function We don't need to start from the curr_active_slave as the frame will be sent to all eligible slaves anyway, so we remove the unnecessary local variables, checks and comments, and make it use the standard list API. This has the nice side-effect that later when it's converted to RCU a race condition will be avoided which could lead to double packet tx. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ac60b69..aa9da00 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3910,52 +3910,32 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) return NETDEV_TX_OK; } -/* - * in broadcast mode, we send everything to all usable interfaces. - */ +/* in broadcast mode, we send everything to all usable interfaces. */ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave, *start_at; - struct net_device *tx_dev = NULL; - int i; - int res = 1; - - start_at = bond->curr_active_slave; - if (!start_at) - goto out; + struct slave *slave = NULL; - bond_for_each_slave_from(bond, slave, i, start_at) { - if (IS_UP(slave->dev) && - (slave->link == BOND_LINK_UP) && - bond_is_active_slave(slave)) { - if (tx_dev) { - struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); - if (!skb2) { - pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n", - bond_dev->name); - continue; - } + bond_for_each_slave(bond, slave) { + if (bond_is_last_slave(bond, slave)) + break; + if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); - res = bond_dev_queue_xmit(bond, skb2, tx_dev); - if (res) { - kfree_skb(skb2); - continue; - } + if (!skb2) { + pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n", + bond_dev->name); + continue; } - tx_dev = slave->dev; + /* bond_dev_queue_xmit always returns 0 */ + bond_dev_queue_xmit(bond, skb2, slave->dev); } } - - if (tx_dev) - res = bond_dev_queue_xmit(bond, skb, tx_dev); - -out: - if (res) - /* no suitable interface, frame not sent */ + if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP) + bond_dev_queue_xmit(bond, skb, slave->dev); + else kfree_skb(skb); - /* frame sent to all suitable interfaces */ return NETDEV_TX_OK; } -- cgit v0.10.2 From 15077228cab68e5e8c3cbf26a7f6ebacfac4c829 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 1 Aug 2013 16:54:50 +0200 Subject: bonding: factor out slave id tx code and simplify xmit paths I factored out the tx xmit code which relies on slave id in bond_xmit_slave_id. It is global because later it can be used also in 3ad mode xmit. Unnecessary obvious comments are removed. Active-backup mode is simplified because bond_dev_queue_xmit always consumes the skb. bond_xmit_xor becomes one line because of bond_xmit_slave_id. bond_for_each_slave_from is not used in bond_xmit_slave_id because later when RCU is used we can avoid important race condition by using standard rculist routines. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index aa9da00..5eee95c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3795,12 +3795,50 @@ unwind: return res; } +/** + * bond_xmit_slave_id - transmit skb through slave with slave_id + * @bond: bonding device that is transmitting + * @skb: buffer to transmit + * @slave_id: slave id up to slave_cnt-1 through which to transmit + * + * This function tries to transmit through slave with slave_id but in case + * it fails, it tries to find the first available slave for transmission. + * The skb is consumed in all cases, thus the function is void. + */ +void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id) +{ + struct slave *slave; + int i = slave_id; + + /* Here we start from the slave with slave_id */ + bond_for_each_slave(bond, slave) { + if (--i < 0) { + if (slave_can_tx(slave)) { + bond_dev_queue_xmit(bond, skb, slave->dev); + return; + } + } + } + + /* Here we start from the first slave up to slave_id */ + i = slave_id; + bond_for_each_slave(bond, slave) { + if (--i < 0) + break; + if (slave_can_tx(slave)) { + bond_dev_queue_xmit(bond, skb, slave->dev); + return; + } + } + /* no slave that can tx has been found */ + kfree_skb(skb); +} + static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave, *start_at; - int i, slave_no, res = 1; struct iphdr *iph = ip_hdr(skb); + struct slave *slave; /* * Start with the curr_active_slave that joined the bond as the @@ -3809,46 +3847,20 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev * send the join/membership reports. The curr_active_slave found * will send all of this type of traffic. */ - if ((iph->protocol == IPPROTO_IGMP) && - (skb->protocol == htons(ETH_P_IP))) { + if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { slave = bond->curr_active_slave; - if (!slave) - goto out; + if (slave && slave_can_tx(slave)) + bond_dev_queue_xmit(bond, skb, slave->dev); + else + bond_xmit_slave_id(bond, skb, 0); } else { - /* - * Concurrent TX may collide on rr_tx_counter; we accept - * that as being rare enough not to justify using an - * atomic op here. - */ - slave_no = bond->rr_tx_counter++ % bond->slave_cnt; - - bond_for_each_slave(bond, slave) { - slave_no--; - if (slave_no < 0) - break; - } - } - - start_at = slave; - bond_for_each_slave_from(bond, slave, i, start_at) { - if (IS_UP(slave->dev) && - (slave->link == BOND_LINK_UP) && - bond_is_active_slave(slave)) { - res = bond_dev_queue_xmit(bond, skb, slave->dev); - break; - } - } - -out: - if (res) { - /* no suitable interface, frame not sent */ - kfree_skb(skb); + bond_xmit_slave_id(bond, skb, + bond->rr_tx_counter++ % bond->slave_cnt); } return NETDEV_TX_OK; } - /* * in active-backup mode, we know that bond->curr_active_slave is always valid if * the bond has a usable interface. @@ -3857,14 +3869,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - int res = 1; slave = bond->curr_active_slave; if (slave) - res = bond_dev_queue_xmit(bond, skb, slave->dev); - - if (res) - /* no suitable interface, frame not sent */ + bond_dev_queue_xmit(bond, skb, slave->dev); + else kfree_skb(skb); return NETDEV_TX_OK; @@ -3878,34 +3887,9 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave, *start_at; - int slave_no; - int i; - int res = 1; - - slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); - - bond_for_each_slave(bond, slave) { - slave_no--; - if (slave_no < 0) - break; - } - - start_at = slave; - bond_for_each_slave_from(bond, slave, i, start_at) { - if (IS_UP(slave->dev) && - (slave->link == BOND_LINK_UP) && - bond_is_active_slave(slave)) { - res = bond_dev_queue_xmit(bond, skb, slave->dev); - break; - } - } - - if (res) { - /* no suitable interface, frame not sent */ - kfree_skb(skb); - } + bond_xmit_slave_id(bond, skb, + bond->xmit_hash_policy(skb, bond->slave_cnt)); return NETDEV_TX_OK; } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index cfd9b49..0995974 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -426,10 +426,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3 return addr; } +static inline bool slave_can_tx(struct slave *slave) +{ + if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP && + bond_is_active_slave(slave)) + return true; + else + return false; +} + struct bond_net; struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); +void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id); int bond_create(struct net *net, const char *name); int bond_create_sysfs(struct bond_net *net); void bond_destroy_sysfs(struct bond_net *net); -- cgit v0.10.2 From 278b20837511776dc9d5f6ee1c7fabd5479838bb Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Thu, 1 Aug 2013 16:54:51 +0200 Subject: bonding: initial RCU conversion This patch does the initial bonding conversion to RCU. After it the following modes are protected by RCU alone: roundrobin, active-backup, broadcast and xor. Modes ALB/TLB and 3ad still acquire bond->lock for reading, and will be dealt with later. curr_active_slave needs to be dereferenced via rcu in the converted modes because the only thing protecting the slave after this patch is rcu_read_lock, so we need the proper barrier for weakly ordered archs and to make sure we don't have stale pointer. It's not tagged with __rcu yet because there's still work to be done to remove the curr_slave_lock, so sparse will complain when rcu_assign_pointer and rcu_dereference are used, but the alternative to use rcu_dereference_protected would've created much bigger code churn which is more difficult to test and review. That will be converted in time. 1. Active-backup mode 1.1 Perf recording while doing iperf -P 4 - old bonding: iperf spent 0.55% in bonding, system spent 0.29% CPU in bonding - new bonding: iperf spent 0.29% in bonding, system spent 0.15% CPU in bonding 1.2. Bandwidth measurements - old bonding: 16.1 gbps consistently - new bonding: 17.5 gbps consistently 2. Round-robin mode 2.1 Perf recording while doing iperf -P 4 - old bonding: iperf spent 0.51% in bonding, system spent 0.24% CPU in bonding - new bonding: iperf spent 0.16% in bonding, system spent 0.11% CPU in bonding 2.2 Bandwidth measurements - old bonding: 8 gbps (variable due to packet reorderings) - new bonding: 10 gbps (variable due to packet reorderings) Of course the latency has improved in all converted modes, and moreover while doing enslave/release (since it doesn't affect tx anymore). Also I've stress tested all modes doing enslave/release in a loop while transmitting traffic. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 7d46fa8..9010265 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2426,6 +2426,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) struct ad_info ad_info; int res = 1; + read_lock(&bond->lock); if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", dev->name); @@ -2475,6 +2476,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) } out: + read_unlock(&bond->lock); if (res) { /* no suitable interface, frame not sent */ kfree_skb(skb); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 4d35196..3a5db7b 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1337,6 +1337,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) /* make sure that the curr_active_slave do not change during tx */ + read_lock(&bond->lock); read_lock(&bond->curr_slave_lock); switch (ntohs(skb->protocol)) { @@ -1441,11 +1442,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) } read_unlock(&bond->curr_slave_lock); - + read_unlock(&bond->lock); if (res) { /* no suitable interface, frame not sent */ kfree_skb(skb); } + return NETDEV_TX_OK; } @@ -1663,7 +1665,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave } swap_slave = bond->curr_active_slave; - bond->curr_active_slave = new_slave; + rcu_assign_pointer(bond->curr_active_slave, new_slave); if (!new_slave || list_empty(&bond->slave_list)) return; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5eee95c..1d37a96 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -77,6 +77,7 @@ #include #include #include +#include #include "bonding.h" #include "bond_3ad.h" #include "bond_alb.h" @@ -1037,7 +1038,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) if (new_active) bond_set_slave_active_flags(new_active); } else { - bond->curr_active_slave = new_active; + rcu_assign_pointer(bond->curr_active_slave, new_active); } if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { @@ -1127,7 +1128,7 @@ void bond_select_active_slave(struct bonding *bond) */ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave) { - list_add_tail(&new_slave->list, &bond->slave_list); + list_add_tail_rcu(&new_slave->list, &bond->slave_list); bond->slave_cnt++; } @@ -1143,7 +1144,7 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave) */ static void bond_detach_slave(struct bonding *bond, struct slave *slave) { - list_del(&slave->list); + list_del_rcu(&slave->list); bond->slave_cnt--; } @@ -1751,7 +1752,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) * so we can change it without calling change_active_interface() */ if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP) - bond->curr_active_slave = new_slave; + rcu_assign_pointer(bond->curr_active_slave, new_slave); break; } /* switch(bond_mode) */ @@ -1951,7 +1952,7 @@ static int __bond_release_one(struct net_device *bond_dev, } if (all) { - bond->curr_active_slave = NULL; + rcu_assign_pointer(bond->curr_active_slave, NULL); } else if (oldcurrent == slave) { /* * Note that we hold RTNL over this sequence, so there @@ -1983,6 +1984,7 @@ static int __bond_release_one(struct net_device *bond_dev, write_unlock_bh(&bond->lock); unblock_netpoll_tx(); + synchronize_rcu(); if (list_empty(&bond->slave_list)) { call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); @@ -3811,7 +3813,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id) int i = slave_id; /* Here we start from the slave with slave_id */ - bond_for_each_slave(bond, slave) { + bond_for_each_slave_rcu(bond, slave) { if (--i < 0) { if (slave_can_tx(slave)) { bond_dev_queue_xmit(bond, skb, slave->dev); @@ -3822,7 +3824,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id) /* Here we start from the first slave up to slave_id */ i = slave_id; - bond_for_each_slave(bond, slave) { + bond_for_each_slave_rcu(bond, slave) { if (--i < 0) break; if (slave_can_tx(slave)) { @@ -3848,7 +3850,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev * will send all of this type of traffic. */ if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { - slave = bond->curr_active_slave; + slave = rcu_dereference(bond->curr_active_slave); if (slave && slave_can_tx(slave)) bond_dev_queue_xmit(bond, skb, slave->dev); else @@ -3870,7 +3872,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - slave = bond->curr_active_slave; + slave = rcu_dereference(bond->curr_active_slave); if (slave) bond_dev_queue_xmit(bond, skb, slave->dev); else @@ -3900,7 +3902,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = NULL; - bond_for_each_slave(bond, slave) { + bond_for_each_slave_rcu(bond, slave) { if (bond_is_last_slave(bond, slave)) break; if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { @@ -3955,7 +3957,7 @@ static inline int bond_slave_override(struct bonding *bond, return 1; /* Find out if any slaves have the same mapping as this skb. */ - bond_for_each_slave(bond, check_slave) { + bond_for_each_slave_rcu(bond, check_slave) { if (check_slave->queue_id == skb->queue_mapping) { slave = check_slave; break; @@ -4040,14 +4042,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) if (is_netpoll_tx_blocked(dev)) return NETDEV_TX_BUSY; - read_lock(&bond->lock); - + rcu_read_lock(); if (!list_empty(&bond->slave_list)) ret = __bond_start_xmit(skb, dev); else kfree_skb(skb); - - read_unlock(&bond->lock); + rcu_read_unlock(); return ret; } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 0702e91..0f539de 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1243,16 +1243,16 @@ static ssize_t bonding_show_active_slave(struct device *d, struct device_attribute *attr, char *buf) { - struct slave *curr; struct bonding *bond = to_bond(d); + struct slave *curr; int count = 0; - read_lock(&bond->curr_slave_lock); - curr = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); - + rcu_read_lock(); + curr = rcu_dereference(bond->curr_active_slave); if (USES_PRIMARY(bond->params.mode) && curr) count = sprintf(buf, "%s\n", curr->dev->name); + rcu_read_unlock(); + return count; } @@ -1284,7 +1284,7 @@ static ssize_t bonding_store_active_slave(struct device *d, if (!strlen(ifname) || buf[0] == '\n') { pr_info("%s: Clearing current active slave.\n", bond->dev->name); - bond->curr_active_slave = NULL; + rcu_assign_pointer(bond->curr_active_slave, NULL); bond_select_active_slave(bond); goto out; } @@ -1347,14 +1347,9 @@ static ssize_t bonding_show_mii_status(struct device *d, struct device_attribute *attr, char *buf) { - struct slave *curr; struct bonding *bond = to_bond(d); - read_lock(&bond->curr_slave_lock); - curr = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); - - return sprintf(buf, "%s\n", curr ? "up" : "down"); + return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down"); } static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 0995974..4bf52d5 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -116,6 +116,10 @@ #define bond_for_each_slave(bond, pos) \ list_for_each_entry(pos, &(bond)->slave_list, list) +/* Caller must have rcu_read_lock */ +#define bond_for_each_slave_rcu(bond, pos) \ + list_for_each_entry_rcu(pos, &(bond)->slave_list, list) + /** * bond_for_each_slave_reverse - iterate in reverse from a given position * @bond: the bond holding this list -- cgit v0.10.2 From b9c119844c42a46a6c6006d158ee33af81fe76ae Mon Sep 17 00:00:00 2001 From: Jitendra Kalsaria Date: Fri, 2 Aug 2013 00:57:39 -0400 Subject: qlcnic: Enhance diagnostic loopback error codes. o Enhanced the driver to use standard Linux error codes o Return a unique error code to indicate loopback is in progress Signed-off-by: Jitendra Kalsaria Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index b00cf56..aa0e68e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -950,12 +950,6 @@ struct qlcnic_ipaddr { #define QLCNIC_READD_AGE 20 #define QLCNIC_LB_MAX_FILTERS 64 #define QLCNIC_LB_BUCKET_SIZE 32 - -/* QLCNIC Driver Error Code */ -#define QLCNIC_FW_NOT_RESPOND 51 -#define QLCNIC_TEST_IN_PROGRESS 52 -#define QLCNIC_UNDEFINED_ERROR 53 -#define QLCNIC_LB_CABLE_NOT_CONN 54 #define QLCNIC_ILB_MAX_RCV_LOOP 10 struct qlcnic_filter { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 0913c62..cda188d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1652,7 +1652,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(netdev, "Loopback test not supported in non privileged mode\n"); - return ret; + return -ENOTSUPP; } if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { @@ -1686,13 +1686,13 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting, free LB test resources\n"); - ret = -EIO; + ret = -EBUSY; goto free_diag_res; } if (loop++ > QLC_83XX_LB_WAIT_COUNT) { netdev_info(netdev, "Firmware didn't sent link up event to loopback request\n"); - ret = -QLCNIC_FW_NOT_RESPOND; + ret = -ETIMEDOUT; qlcnic_83xx_clear_lb_mode(adapter, mode); goto free_diag_res; } @@ -1729,6 +1729,15 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) return status; config = ahw->port_config; + + /* Check if port is already in loopback mode */ + if ((config & QLC_83XX_CFG_LOOPBACK_HSS) || + (config & QLC_83XX_CFG_LOOPBACK_EXT)) { + netdev_err(netdev, + "Port already in Loopback mode.\n"); + return -EINPROGRESS; + } + set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); if (mode == QLCNIC_ILB_MODE) @@ -1756,14 +1765,14 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) netdev_info(netdev, "Device is resetting, free LB test resources\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); - return -EIO; + return -EBUSY; } if (loop++ > QLC_83XX_LB_WAIT_COUNT) { netdev_err(netdev, "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); qlcnic_83xx_clear_lb_mode(adapter, mode); - return -EIO; + return -ETIMEDOUT; } } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); @@ -1805,14 +1814,14 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) netdev_info(netdev, "Device is resetting, free LB test resources\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); - return -EIO; + return -EBUSY; } if (loop++ > QLC_83XX_LB_WAIT_COUNT) { netdev_err(netdev, "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); - return -EIO; + return -ETIMEDOUT; } } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 700a463..94a728d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -980,9 +980,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode) msleep(500); qlcnic_process_rcv_ring_diag(sds_ring); if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - netdev_info(netdev, "firmware didnt respond to loopback" - " configure request\n"); - ret = -QLCNIC_FW_NOT_RESPOND; + netdev_info(netdev, + "Firmware didn't sent link up event to loopback request\n"); + ret = -ETIMEDOUT; goto free_res; } else if (adapter->ahw->diag_cnt) { ret = adapter->ahw->diag_cnt; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index d3f8797..1994b44 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -919,17 +919,17 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index, break; case 1: dev_info(dev, "loopback already in progress\n"); - adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; + adapter->ahw->diag_cnt = -EINPROGRESS; break; case 2: dev_info(dev, "loopback cable is not connected\n"); - adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; + adapter->ahw->diag_cnt = -ENODEV; break; default: dev_info(dev, "loopback configure request failed, err %x\n", ret); - adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR; + adapter->ahw->diag_cnt = -EIO; break; } break; -- cgit v0.10.2 From e5c4e6c696aea58fbea5758e8b2841d2b0309cf7 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 2 Aug 2013 00:57:40 -0400 Subject: qlcnic: Interrupt based driver firmware mailbox mechanism o Driver firmware mailbox interface was operating in polling mode because of limitations with the earlier versions of 83xx adapter firmware. These issues are resolved and we are implementing interrupt based mailbox mechanism. o Data structures and API's for interrupt mode mailbox mechanism. Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index aa0e68e..5a49b64 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -20,7 +20,6 @@ #include #include #include - #include #include #include @@ -468,6 +467,7 @@ struct qlcnic_hardware_context { u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; u32 mbox_reg[4]; spinlock_t mbx_lock; + struct qlcnic_mailbox *mailbox; }; struct qlcnic_adapter_stats { @@ -966,6 +966,21 @@ struct qlcnic_filter_hash { u16 fbucket_size; }; +/* Mailbox specific data structures */ +struct qlcnic_mailbox { + struct workqueue_struct *work_q; + struct qlcnic_adapter *adapter; + struct qlcnic_mbx_ops *ops; + struct work_struct work; + struct completion completion; + struct list_head cmd_q; + unsigned long status; + spinlock_t queue_lock; /* Mailbox queue lock */ + spinlock_t aen_lock; /* Mailbox response/AEN lock */ + atomic_t rsp_status; + u32 num_cmds; +}; + struct qlcnic_adapter { struct qlcnic_hardware_context *ahw; struct qlcnic_recv_context *recv_ctx; @@ -1379,9 +1394,20 @@ struct _cdrp_cmd { }; struct qlcnic_cmd_args { - struct _cdrp_cmd req; - struct _cdrp_cmd rsp; - int op_type; + struct completion completion; + struct list_head list; + struct _cdrp_cmd req; + struct _cdrp_cmd rsp; + atomic_t rsp_status; + int pay_size; + u32 rsp_opcode; + u32 total_cmds; + u32 op_type; + u32 type; + u32 cmd_op; + u32 *hdr; /* Back channel message header */ + u32 *pay; /* Back channel message payload */ + u8 func_num; }; int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); @@ -1594,6 +1620,20 @@ struct qlcnic_nic_template { int (*resume)(struct qlcnic_adapter *); }; +struct qlcnic_mbx_ops { + int (*enqueue_cmd) (struct qlcnic_adapter *, + struct qlcnic_cmd_args *, unsigned long *); + void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*nofity_fw) (struct qlcnic_adapter *, u8); +}; + +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); + /* Adapter hardware abstraction */ struct qlcnic_hardware_ops { void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cda188d..74c8d84 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -149,7 +149,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .get_mac_address = qlcnic_83xx_get_mac_address, .setup_intr = qlcnic_83xx_setup_intr, .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, - .mbx_cmd = qlcnic_83xx_mbx_op, + .mbx_cmd = qlcnic_83xx_issue_cmd, .get_func_no = qlcnic_83xx_get_func_no, .api_lock = qlcnic_83xx_cam_lock, .api_unlock = qlcnic_83xx_cam_unlock, @@ -398,6 +398,12 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) return IRQ_HANDLED; } +static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) +{ + atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + complete(&mbx->completion); +} + static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) { u32 resp, event; @@ -515,7 +521,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) } /* Enable mailbox interrupt */ - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); return err; } @@ -629,7 +635,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter) ahw->max_uc_count = count; } -void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter) { u32 val; @@ -737,8 +743,8 @@ u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) return data; } -int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd) +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { int i; u16 opcode; @@ -829,6 +835,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, u32 temp; const struct qlcnic_mailbox_metadata *mbx_tbl; + memset(mbx, 0, sizeof(struct qlcnic_cmd_args)); mbx_tbl = qlcnic_83xx_mbx_tbl; size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); for (i = 0; i < size; i++) { @@ -3455,3 +3462,300 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) idc->delay); return err; } + +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx) +{ + INIT_COMPLETION(mbx->completion); + set_bit(QLC_83XX_MBX_READY, &mbx->status); +} + +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx) +{ + destroy_workqueue(mbx->work_q); + kfree(mbx); +} + +static inline void +qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); + return; + } + complete(&cmd->completion); +} + +static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + struct list_head *head = &mbx->cmd_q; + struct qlcnic_cmd_args *cmd = NULL; + + spin_lock(&mbx->queue_lock); + + while (!list_empty(head)) { + cmd = list_entry(head->next, struct qlcnic_cmd_args, list); + list_del(&cmd->list); + mbx->num_cmds--; + qlcnic_83xx_notify_cmd_completion(adapter, cmd); + } + + spin_unlock(&mbx->queue_lock); +} + +static inline int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + u32 host_mbx_ctrl; + + if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) + return -EBUSY; + + host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (host_mbx_ctrl) { + ahw->idc.collect_dump = 1; + return -EIO; + } + + return 0; +} + +static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter, + u8 issue_cmd) +{ + if (issue_cmd) + QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); + else + QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); +} + +static inline void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + spin_lock(&mbx->queue_lock); + + list_del(&cmd->list); + mbx->num_cmds--; + + spin_unlock(&mbx->queue_lock); + + qlcnic_83xx_notify_cmd_completion(adapter, cmd); +} + +static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int i, j; + + if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) { + mbx_cmd = cmd->req.arg[0]; + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + for (i = 1; i < cmd->req.num; i++) + writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i)); + } else { + fw_hal_version = ahw->fw_hal_version; + hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32); + total_size = cmd->pay_size + hdr_size; + tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16; + mbx_cmd = tmp | fw_hal_version << 29; + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + + /* Back channel specific operations bits */ + mbx_cmd = 0x1 | 1 << 4; + + if (qlcnic_sriov_pf_check(adapter)) + mbx_cmd |= cmd->func_num << 5; + + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); + + for (i = 2, j = 0; j < hdr_size; i++, j++) + writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i)); + for (j = 0; j < cmd->pay_size; j++, i++) + writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i)); + } +} + +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + complete(&mbx->completion); + cancel_work_sync(&mbx->work); + flush_workqueue(mbx->work_q); + qlcnic_83xx_flush_mbx_queue(adapter); +} + +static inline int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd, + unsigned long *timeout) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) { + atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + init_completion(&cmd->completion); + cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; + + spin_lock(&mbx->queue_lock); + + list_add_tail(&cmd->list, &mbx->cmd_q); + mbx->num_cmds++; + cmd->total_cmds = mbx->num_cmds; + *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; + queue_work(mbx->work_q, &mbx->work); + + spin_unlock(&mbx->queue_lock); + + return 0; + } + + return -EBUSY; +} + +static inline int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + u8 mac_cmd_rcode; + u32 fw_data; + + if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) { + fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); + mac_cmd_rcode = (u8)fw_data; + if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE || + mac_cmd_rcode == QLC_83XX_MAC_PRESENT || + mac_cmd_rcode == QLC_83XX_MAC_ABSENT) { + cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; + return QLCNIC_RCODE_SUCCESS; + } + } + + return -EINVAL; +} + +static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + u8 mbx_err_code; + u32 fw_data; + + fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); + mbx_err_code = QLCNIC_MBX_STATUS(fw_data); + qlcnic_83xx_get_mbx_data(adapter, cmd); + + switch (mbx_err_code) { + case QLCNIC_MBX_RSP_OK: + case QLCNIC_MBX_PORT_RSP_OK: + cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; + break; + default: + if (!qlcnic_83xx_check_mac_rcode(adapter, cmd)) + break; + + dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n", + __func__, cmd->cmd_op, cmd->type, ahw->pci_func, + ahw->op_mode, mbx_err_code); + cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED; + qlcnic_dump_mbx(adapter, cmd); + } + + return; +} + +static void qlcnic_83xx_mailbox_worker(struct work_struct *work) +{ + struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, + work); + struct qlcnic_adapter *adapter = mbx->adapter; + struct qlcnic_mbx_ops *mbx_ops = mbx->ops; + struct device *dev = &adapter->pdev->dev; + atomic_t *rsp_status = &mbx->rsp_status; + struct list_head *head = &mbx->cmd_q; + struct qlcnic_hardware_context *ahw; + struct qlcnic_cmd_args *cmd = NULL; + + ahw = adapter->ahw; + + while (true) { + if (qlcnic_83xx_check_mbx_status(adapter)) + return; + + atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + + spin_lock(&mbx->queue_lock); + + if (list_empty(head)) { + spin_unlock(&mbx->queue_lock); + return; + } + cmd = list_entry(head->next, struct qlcnic_cmd_args, list); + + spin_unlock(&mbx->queue_lock); + + mbx_ops->encode_cmd(adapter, cmd); + mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); + + if (wait_for_completion_timeout(&mbx->completion, + QLC_83XX_MBX_TIMEOUT)) { + mbx_ops->decode_resp(adapter, cmd); + mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION); + } else { + dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n", + __func__, cmd->cmd_op, cmd->type, ahw->pci_func, + ahw->op_mode); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_83xx_idc_request_reset(adapter, + QLCNIC_FORCE_FW_DUMP_KEY); + cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT; + } + mbx_ops->dequeue_cmd(adapter, cmd); + } +} + +static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = { + .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd, + .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd, + .decode_resp = qlcnic_83xx_decode_mbx_rsp, + .encode_cmd = qlcnic_83xx_encode_mbx_cmd, + .nofity_fw = qlcnic_83xx_signal_mbx_cmd, +}; + +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx; + + ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL); + if (!ahw->mailbox) + return -ENOMEM; + + mbx = ahw->mailbox; + mbx->ops = &qlcnic_83xx_mbx_ops; + mbx->adapter = adapter; + + spin_lock_init(&mbx->queue_lock); + spin_lock_init(&mbx->aen_lock); + INIT_LIST_HEAD(&mbx->cmd_q); + init_completion(&mbx->completion); + + mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox"); + if (mbx->work_q == NULL) { + kfree(mbx); + return -ENOMEM; + } + + INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker); + set_bit(QLC_83XX_MBX_READY, &mbx->status); + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 2548d14..542b58d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -89,6 +89,13 @@ #define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 +#define QLC_83XX_MBX_POST_BC_OP 0x1 +#define QLC_83XX_MBX_COMPLETION 0x0 +#define QLC_83XX_MBX_REQUEST 0x1 + +#define QLC_83XX_MBX_TIMEOUT (5 * HZ) +#define QLC_83XX_MBX_CMD_LOOP 5000000 + /* status descriptor mailbox data * @phy_addr_{low|high}: physical address of buffer * @sds_ring_size: buffer size @@ -449,6 +456,20 @@ enum qlcnic_83xx_states { #define QLC_83xx_FLASH_MAX_WAIT_USEC 100 #define QLC_83XX_FLASH_LOCK_TIMEOUT 10000 +enum qlc_83xx_mbx_cmd_type { + QLC_83XX_MBX_CMD_WAIT = 0, + QLC_83XX_MBX_CMD_NO_WAIT, + QLC_83XX_MBX_CMD_BUSY_WAIT, +}; + +enum qlc_83xx_mbx_response_states { + QLC_83XX_MBX_RESPONSE_WAIT = 0, + QLC_83XX_MBX_RESPONSE_ARRIVED, +}; + +#define QLC_83XX_MBX_RESPONSE_FAILED 0x2 +#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3 + /* Additional registers in 83xx */ enum qlc_83xx_ext_regs { QLCNIC_GLOBAL_RESET = 0, @@ -498,7 +519,7 @@ enum qlc_83xx_ext_regs { /* 83xx funcitons */ int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); -int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8); void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); @@ -551,7 +572,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *, void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *); irqreturn_t qlcnic_83xx_handle_aen(int, void *); int qlcnic_83xx_get_port_info(struct qlcnic_adapter *); -void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *); +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *); irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *); irqreturn_t qlcnic_83xx_intr(int, void *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f41dfab..0c5110c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -617,7 +617,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) if (err) return err; - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); if (qlcnic_83xx_configure_opmode(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); @@ -2120,7 +2120,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) /* Initilaize 83xx mailbox spinlock */ spin_lock_init(&ahw->mbx_lock); - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + set_bit(QLC_83XX_MBX_READY, &ahw->idc.status); qlcnic_83xx_clear_function_resources(adapter); /* register for NIC IDC AEN Events */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 62380ce..d9c6ae5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); -static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); static void qlcnic_sriov_process_bc_cmd(struct work_struct *); @@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { .get_mac_address = qlcnic_83xx_get_mac_address, .setup_intr = qlcnic_83xx_setup_intr, .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, - .mbx_cmd = qlcnic_sriov_vf_mbx_op, + .mbx_cmd = qlcnic_sriov_issue_cmd, .get_func_no = qlcnic_83xx_get_func_no, .api_lock = qlcnic_83xx_cam_lock, .api_unlock = qlcnic_83xx_cam_unlock, @@ -295,7 +295,7 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; - if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { + if (!test_bit(QLC_83XX_MBX_READY, &ahw->idc.status)) { dev_info(&adapter->pdev->dev, "Mailbox cmd attempted, 0x%x\n", opcode); dev_info(&adapter->pdev->dev, "Mailbox detached\n"); @@ -1083,6 +1083,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) if (test_bit(QLC_BC_VF_FLR, &vf->state)) return; + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); trans = list_first_entry(&vf->rcv_act.wait_list, struct qlcnic_bc_trans, list); adapter = vf->adapter; @@ -1232,6 +1233,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, return; } + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); cmd_op = hdr->cmd_op; if (qlcnic_sriov_alloc_bc_trans(&trans)) return; @@ -1357,7 +1359,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) if (enable) cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); - err = qlcnic_83xx_mbx_op(adapter, &cmd); + err = qlcnic_83xx_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, @@ -1389,7 +1391,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, return -EIO; } -static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -1409,7 +1411,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, goto cleanup_transaction; retry: - if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { + if (!test_bit(QLC_83XX_MBX_READY, &ahw->idc.status)) { rsp = -EIO; QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", QLCNIC_MBX_RSP(cmd->req.arg[0]), func); @@ -1612,7 +1614,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) int err; set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) @@ -1988,7 +1990,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) int err; set_bit(QLC_83XX_MODULE_LOADED, &idc->status); - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) return err; -- cgit v0.10.2 From 068a8d197e009efddf63619b78c53848f19b22ff Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 2 Aug 2013 00:57:41 -0400 Subject: qlcnic: Replace poll mode mailbox interface with interrupt based mailbox interface Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 5a49b64..cd95442 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -466,7 +466,6 @@ struct qlcnic_hardware_context { u32 *ext_reg_tbl; u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; u32 mbox_reg[4]; - spinlock_t mbx_lock; struct qlcnic_mailbox *mailbox; }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 74c8d84..1eeeed8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -362,6 +362,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { int i; + + if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) + return; + for (i = 0; i < cmd->rsp.num; i++) cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i)); } @@ -406,22 +410,25 @@ static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) { - u32 resp, event; + u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; unsigned long flags; - spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); - + spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); if (!(resp & QLCNIC_SET_OWNER)) goto out; event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); - if (event & QLCNIC_MBX_ASYNC_EVENT) + if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - + } else { + if (atomic_read(&mbx->rsp_status) != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + } out: qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); + spin_unlock_irqrestore(&mbx->aen_lock, flags); } irqreturn_t qlcnic_83xx_intr(int irq, void *data) @@ -694,6 +701,9 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, { int i; + if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) + return; + dev_info(&adapter->pdev->dev, "Host MBX regs(%d)\n", cmd->req.num); for (i = 0; i < cmd->req.num; i++) { @@ -712,120 +722,74 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, pr_info("\n"); } -/* Mailbox response for mac rcode */ -u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) +static inline void +qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { - u32 fw_data; - u8 mac_cmd_rcode; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int opcode = LSW(cmd->req.arg[0]); + unsigned long max_loops; - fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); - mac_cmd_rcode = (u8)fw_data; - if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE || - mac_cmd_rcode == QLC_83XX_MAC_PRESENT || - mac_cmd_rcode == QLC_83XX_MAC_ABSENT) - return QLCNIC_RCODE_SUCCESS; - return 1; -} + max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP; -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) -{ - u32 data; - struct qlcnic_hardware_context *ahw = adapter->ahw; - /* wait for mailbox completion */ - do { - data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); - if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { - data = QLCNIC_RCODE_TIMEOUT; - break; - } - mdelay(1); - } while (!data); - return data; + for (; max_loops; max_loops--) { + if (atomic_read(&cmd->rsp_status) == + QLC_83XX_MBX_RESPONSE_ARRIVED) + return; + + udelay(1); + } + + dev_err(&adapter->pdev->dev, + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode); + flush_workqueue(ahw->mailbox->work_q); + return; } int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { - int i; - u16 opcode; - u8 mbx_err_code; - unsigned long flags; + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; + int cmd_type, err, opcode; + unsigned long timeout; opcode = LSW(cmd->req.arg[0]); - if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { - dev_info(&adapter->pdev->dev, - "Mailbox cmd attempted, 0x%x\n", opcode); - dev_info(&adapter->pdev->dev, "Mailbox detached\n"); - return 0; + cmd_type = cmd->type; + err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd->type, ahw->pci_func, + ahw->op_mode); + return err; } - spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - - if (mbx_val) { - QLCDB(adapter, DRV, - "Mailbox cmd attempted, 0x%x\n", opcode); - QLCDB(adapter, DRV, - "Mailbox not available, 0x%x, collect FW dump\n", - mbx_val); - cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - return cmd->rsp.arg[0]; - } - - /* Fill in mailbox registers */ - mbx_cmd = cmd->req.arg[0]; - writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); - for (i = 1; i < cmd->req.num; i++) - writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i)); - - /* Signal FW about the impending command */ - QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); -poll: - rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); - if (rsp != QLCNIC_RCODE_TIMEOUT) { - /* Get the FW response data */ - fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); - if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { - __qlcnic_83xx_process_aen(adapter); - goto poll; - } - mbx_err_code = QLCNIC_MBX_STATUS(fw_data); - rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); - opcode = QLCNIC_MBX_RSP(fw_data); - qlcnic_83xx_get_mbx_data(adapter, cmd); - - switch (mbx_err_code) { - case QLCNIC_MBX_RSP_OK: - case QLCNIC_MBX_PORT_RSP_OK: - rsp = QLCNIC_RCODE_SUCCESS; - break; - default: - if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) { - rsp = qlcnic_83xx_mac_rcode(adapter); - if (!rsp) - goto out; - } + switch (cmd_type) { + case QLC_83XX_MBX_CMD_WAIT: + if (!wait_for_completion_timeout(&cmd->completion, timeout)) { dev_err(&adapter->pdev->dev, - "MBX command 0x%x failed with err:0x%x\n", - opcode, mbx_err_code); - rsp = mbx_err_code; - qlcnic_dump_mbx(adapter, cmd); - break; + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd_type, ahw->pci_func, + ahw->op_mode); + flush_workqueue(mbx->work_q); } - goto out; + break; + case QLC_83XX_MBX_CMD_NO_WAIT: + return 0; + case QLC_83XX_MBX_CMD_BUSY_WAIT: + qlcnic_83xx_poll_for_mbx_completion(adapter, cmd); + break; + default: + dev_err(&adapter->pdev->dev, + "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd_type, ahw->pci_func, + ahw->op_mode); + qlcnic_83xx_detach_mailbox_work(adapter); } - dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", - QLCNIC_MBX_RSP(mbx_cmd)); - rsp = QLCNIC_RCODE_TIMEOUT; -out: - /* clear fw mbx control register */ - QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - return rsp; + return cmd->rsp_opcode; } int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, @@ -858,6 +822,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); temp = adapter->ahw->fw_hal_version << 29; mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); + mbx->cmd_op = type; return 0; } } @@ -941,20 +906,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) { + u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 resp, event; + struct qlcnic_mailbox *mbx = ahw->mailbox; unsigned long flags; - spin_lock_irqsave(&ahw->mbx_lock, flags); - + spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); if (resp & QLCNIC_SET_OWNER) { event = readl(QLCNIC_MBX_FW(ahw, 0)); - if (event & QLCNIC_MBX_ASYNC_EVENT) + if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); + } else { + if (atomic_read(&mbx->rsp_status) != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + } } - - spin_unlock_irqrestore(&ahw->mbx_lock, flags); + spin_unlock_irqrestore(&mbx->aen_lock, flags); } static void qlcnic_83xx_mbx_poll_work(struct work_struct *work) @@ -1627,26 +1595,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter, int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { - int err; + struct qlcnic_cmd_args *cmd = NULL; u32 temp = 0; - struct qlcnic_cmd_args cmd; + int err; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; - err = qlcnic_alloc_mbx_args(&cmd, adapter, + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); if (err) - return err; + goto out; + cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; qlcnic_83xx_set_interface_id_promisc(adapter, &temp); - cmd.req.arg[1] = (mode ? 1 : 0) | temp; - err = qlcnic_issue_cmd(adapter, &cmd); - if (err) - dev_info(&adapter->pdev->dev, - "Promiscous mode config failed\n"); + cmd->req.arg[1] = (mode ? 1 : 0) | temp; + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) + return err; - qlcnic_free_mbx_args(&cmd); + qlcnic_free_mbx_args(cmd); + +out: + kfree(cmd); return err; } @@ -1967,25 +1942,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter, int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, u16 vlan_id, u8 op) { - int err; - u32 *buf, temp = 0; - struct qlcnic_cmd_args cmd; + struct qlcnic_cmd_args *cmd = NULL; struct qlcnic_macvlan_mbx mv; + u32 *buf, temp = 0; + int err; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; - err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); if (err) - return err; + goto out; + + cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; if (vlan_id) op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; - cmd.req.arg[1] = op | (1 << 8); + cmd->req.arg[1] = op | (1 << 8); qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); - cmd.req.arg[1] |= temp; + cmd->req.arg[1] |= temp; mv.vlan = vlan_id; mv.mac_addr0 = addr[0]; mv.mac_addr1 = addr[1]; @@ -1993,14 +1974,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, mv.mac_addr3 = addr[3]; mv.mac_addr4 = addr[4]; mv.mac_addr5 = addr[5]; - buf = &cmd.req.arg[2]; + buf = &cmd->req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); - err = qlcnic_issue_cmd(adapter, &cmd); - if (err) - dev_err(&adapter->pdev->dev, - "MAC-VLAN %s to CAM failed, err=%d.\n", - ((op == 1) ? "add " : "delete "), err); - qlcnic_free_mbx_args(&cmd); + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) + return err; + + qlcnic_free_mbx_args(cmd); +out: + kfree(cmd); return err; } @@ -2109,10 +2091,12 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) { struct qlcnic_adapter *adapter = data; - unsigned long flags; + struct qlcnic_mailbox *mbx; u32 mask, resp, event; + unsigned long flags; - spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); + mbx = adapter->ahw->mailbox; + spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); if (!(resp & QLCNIC_SET_OWNER)) goto out; @@ -2120,11 +2104,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); if (event & QLCNIC_MBX_ASYNC_EVENT) __qlcnic_83xx_process_aen(adapter); + else + qlcnic_83xx_notify_mbx_response(mbx); + out: mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); writel(0, adapter->ahw->pci_base0 + mask); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - + spin_unlock_irqrestore(&mbx->aen_lock, flags); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 542b58d..9993705 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -644,8 +644,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state); int qlcnic_83xx_flash_test(struct qlcnic_adapter *); int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); -u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 0c5110c..bb7c649 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter) struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); + qlcnic_83xx_detach_mailbox_work(adapter); /* Disable mailbox interrupt */ qlcnic_83xx_disable_mbx_intr(adapter); @@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) { int err; + qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); + qlcnic_83xx_enable_mbx_interrupt(adapter); + /* register for NIC IDC AEN Events */ qlcnic_83xx_register_nic_idc_func(adapter, 1); @@ -640,7 +644,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) struct qlcnic_hardware_context *ahw = adapter->ahw; qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); @@ -810,9 +813,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter) **/ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) { - u32 val; struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; int ret = 0; + u32 val; /* Perform NIC configuration based ready state entry actions */ if (ahw->idc.state_entry(adapter)) @@ -824,7 +828,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) dev_err(&adapter->pdev->dev, "Error: device temperature %d above limits\n", adapter->ahw->temp); - clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_detach_driver(adapter); qlcnic_83xx_idc_enter_failed_state(adapter, 1); @@ -837,7 +841,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) if (ret) { adapter->flags |= QLCNIC_FW_HANG; if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { - clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); } @@ -845,6 +849,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) } if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + /* Move to need reset state and prepare for reset */ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); return ret; @@ -882,12 +888,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) **/ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) { + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; int ret = 0; if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(__QLCNIC_RESETTING, &adapter->state); - clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); @@ -1079,7 +1086,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; clear_bit(__QLCNIC_RESETTING, &adapter->state); - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); /* Check if reset recovery is disabled */ @@ -1190,6 +1196,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) { u32 val; + if (qlcnic_sriov_vf_check(adapter)) + return; + if (qlcnic_83xx_lock_driver(adapter)) { dev_err(&adapter->pdev->dev, "%s:failed, please retry\n", __func__); @@ -2110,17 +2119,35 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) { struct qlcnic_hardware_context *ahw = adapter->ahw; + int err = 0; - if (qlcnic_sriov_vf_check(adapter)) - return qlcnic_sriov_vf_init(adapter, pci_using_dac); + ahw->msix_supported = !!qlcnic_use_msi_x; + err = qlcnic_83xx_init_mailbox_work(adapter); + if (err) + goto exit; - if (qlcnic_83xx_check_hw_status(adapter)) - return -EIO; + if (qlcnic_sriov_vf_check(adapter)) { + err = qlcnic_sriov_vf_init(adapter, pci_using_dac); + if (err) + goto detach_mbx; + else + return err; + } - /* Initilaize 83xx mailbox spinlock */ - spin_lock_init(&ahw->mbx_lock); + err = qlcnic_83xx_check_hw_status(adapter); + if (err) + goto detach_mbx; + + err = qlcnic_setup_intr(adapter, 0); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); + goto disable_intr; + } + + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) + goto disable_mbx_intr; - set_bit(QLC_83XX_MBX_READY, &ahw->idc.status); qlcnic_83xx_clear_function_resources(adapter); /* register for NIC IDC AEN Events */ @@ -2129,21 +2156,35 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (!qlcnic_83xx_read_flash_descriptor_table(adapter)) qlcnic_83xx_read_flash_mfg_id(adapter); - if (qlcnic_83xx_idc_init(adapter)) - return -EIO; + err = qlcnic_83xx_idc_init(adapter); + if (err) + goto disable_mbx_intr; /* Configure default, SR-IOV or Virtual NIC mode of operation */ - if (qlcnic_83xx_configure_opmode(adapter)) - return -EIO; + err = qlcnic_83xx_configure_opmode(adapter); + if (err) + goto disable_mbx_intr; /* Perform operating mode specific initialization */ - if (adapter->nic_ops->init_driver(adapter)) - return -EIO; + err = adapter->nic_ops->init_driver(adapter); + if (err) + goto disable_mbx_intr; INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); /* Periodically monitor device status */ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); + return 0; - return adapter->ahw->idc.err_code; +disable_mbx_intr: + qlcnic_83xx_free_mbx_intr(adapter); + +disable_intr: + qlcnic_teardown_intr(adapter); + +detach_mbx: + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_free_mailbox(ahw->mailbox); +exit: + return err; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 4528f8e..cdc24e4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2141,16 +2141,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "83xx adapter do not support MSI interrupts\n"); - err = qlcnic_setup_intr(adapter, 0); - if (err) { - dev_err(&pdev->dev, "Failed to setup interrupt\n"); - goto err_out_disable_msi; - } - - if (qlcnic_83xx_check(adapter)) { - err = qlcnic_83xx_setup_mbx_intr(adapter); - if (err) + if (qlcnic_82xx_check(adapter)) { + err = qlcnic_setup_intr(adapter, 0); + if (err) { + dev_err(&pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; + } } err = qlcnic_get_act_pci_func(adapter); @@ -2237,9 +2233,11 @@ static void qlcnic_remove(struct pci_dev *pdev) qlcnic_sriov_cleanup(adapter); if (qlcnic_83xx_check(adapter)) { - qlcnic_83xx_free_mbx_intr(adapter); qlcnic_83xx_register_nic_idc_func(adapter, 0); cancel_delayed_work_sync(&adapter->idc_aen_work); + qlcnic_83xx_free_mbx_intr(adapter); + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_free_mailbox(ahw->mailbox); } qlcnic_detach(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index d9c6ae5..e58c1d4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; - unsigned long flags; - u16 opcode; - u8 mbx_err_code; - int i, j; - - opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; - - if (!test_bit(QLC_83XX_MBX_READY, &ahw->idc.status)) { - dev_info(&adapter->pdev->dev, - "Mailbox cmd attempted, 0x%x\n", opcode); - dev_info(&adapter->pdev->dev, "Mailbox detached\n"); - return 0; - } - - spin_lock_irqsave(&ahw->mbx_lock, flags); - - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) { - QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode); - spin_unlock_irqrestore(&ahw->mbx_lock, flags); - return QLCNIC_RCODE_TIMEOUT; - } - /* Fill in mailbox registers */ - val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); - mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29); - - writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); - mbx_cmd = 0x1 | (1 << 4); - - if (qlcnic_sriov_pf_check(adapter)) - mbx_cmd |= (pci_func << 5); + struct qlcnic_mailbox *mbx = ahw->mailbox; + struct qlcnic_cmd_args cmd; + unsigned long timeout; + int err; - writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); - for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); - i++, j++) { - writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i)); + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); + cmd.hdr = hdr; + cmd.pay = pay; + cmd.pay_size = size; + cmd.func_num = pci_func; + cmd.op_type = QLC_83XX_MBX_POST_BC_OP; + cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; + + err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, cmd.cmd_op, cmd.type, ahw->pci_func, + ahw->op_mode); + return err; } - for (j = 0; j < size; j++, i++) - writel(*(pay++), QLCNIC_MBX_HOST(ahw, i)); - /* Signal FW about the impending command */ - QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); - - /* Waiting for the mailbox cmd to complete and while waiting here - * some AEN might arrive. If more than 5 seconds expire we can - * assume something is wrong. - */ -poll: - rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); - if (rsp != QLCNIC_RCODE_TIMEOUT) { - /* Get the FW response data */ - fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); - if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { - __qlcnic_83xx_process_aen(adapter); - goto poll; - } - mbx_err_code = QLCNIC_MBX_STATUS(fw_data); - rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); - opcode = QLCNIC_MBX_RSP(fw_data); - - switch (mbx_err_code) { - case QLCNIC_MBX_RSP_OK: - case QLCNIC_MBX_PORT_RSP_OK: - rsp = QLCNIC_RCODE_SUCCESS; - break; - default: - if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) { - rsp = qlcnic_83xx_mac_rcode(adapter); - if (!rsp) - goto out; - } - dev_err(&adapter->pdev->dev, - "MBX command 0x%x failed with err:0x%x\n", - opcode, mbx_err_code); - rsp = mbx_err_code; - break; - } - goto out; + if (!wait_for_completion_timeout(&cmd.completion, timeout)) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, cmd.cmd_op, cmd.type, ahw->pci_func, + ahw->op_mode); + flush_workqueue(mbx->work_q); } - dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", - QLCNIC_MBX_RSP(mbx_cmd)); - rsp = QLCNIC_RCODE_TIMEOUT; -out: - /* clear fw mbx control register */ - QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - return rsp; + return cmd.rsp_opcode; } static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) @@ -522,8 +464,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) { - struct qlcnic_info nic_info; struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_info nic_info; int err; err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); @@ -637,8 +579,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) struct qlcnic_hardware_context *ahw = adapter->ahw; int err; - spin_lock_init(&ahw->mbx_lock); - set_bit(QLC_83XX_MBX_READY, &ahw->idc.status); set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; ahw->reset_context = 0; @@ -1395,6 +1335,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlcnic_bc_trans *trans; int err; @@ -1411,7 +1352,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, goto cleanup_transaction; retry: - if (!test_bit(QLC_83XX_MBX_READY, &ahw->idc.status)) { + if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) { rsp = -EIO; QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", QLCNIC_MBX_RSP(cmd->req.arg[0]), func); @@ -1454,7 +1395,7 @@ err_out: if (rsp == QLCNIC_RCODE_TIMEOUT) { ahw->reset_context = 1; adapter->need_fw_reset = 1; - clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); } cleanup_transaction: @@ -1657,8 +1598,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) struct net_device *netdev = adapter->netdev; u8 i, max_ints = ahw->num_msix - 1; - qlcnic_83xx_disable_mbx_intr(adapter); netif_device_detach(netdev); + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_disable_mbx_intr(adapter); + if (netif_running(netdev)) qlcnic_down(adapter, netdev); @@ -1702,6 +1645,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlc_83xx_idc *idc = &ahw->idc; u8 func = ahw->pci_func; @@ -1712,7 +1656,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) /* Skip the context reset and check if FW is hung */ if (adapter->reset_ctx_cnt < 3) { adapter->need_fw_reset = 1; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); dev_info(dev, "Resetting context, wait here to check if FW is in failed state\n"); return 0; @@ -1737,7 +1681,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) __func__, adapter->reset_ctx_cnt, func); set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->need_fw_reset = 1; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); adapter->need_fw_reset = 0; @@ -1787,6 +1731,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) { + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); @@ -1794,7 +1739,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } @@ -1803,6 +1748,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) { + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; u8 func = adapter->ahw->pci_func; @@ -1812,7 +1758,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } return 0; -- cgit v0.10.2 From 79da4d0894802398e347fcb1d867c6cbd7f4cc88 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 2 Aug 2013 00:57:42 -0400 Subject: qlcnic: Enable mailbox interface in poll mode when interrupts are not available Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 1eeeed8..948ab72 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -945,6 +945,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter) return; INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work); + queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0); } void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter) @@ -1331,8 +1332,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { /* disable and free mailbox interrupt */ - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + qlcnic_83xx_enable_mbx_poll(adapter); qlcnic_83xx_free_mbx_intr(adapter); + } adapter->ahw->loopback_state = 0; adapter->ahw->hw_ops->setup_link_event(adapter, 1); } @@ -1353,6 +1356,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_83xx_disable_intr(adapter, sds_ring); + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) + qlcnic_83xx_enable_mbx_poll(adapter); } } @@ -1362,6 +1367,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { err = qlcnic_83xx_setup_mbx_intr(adapter); + qlcnic_83xx_disable_mbx_poll(adapter); if (err) { dev_err(&adapter->pdev->dev, "%s: failed to setup mbx interrupt\n", @@ -1378,6 +1384,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, if (netif_running(netdev)) __qlcnic_up(adapter, netdev); + + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST && + !(adapter->flags & QLCNIC_MSIX_ENABLED)) + qlcnic_83xx_disable_mbx_poll(adapter); out: netif_device_attach(netdev); } @@ -1662,8 +1672,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) /* Poll for link up event before running traffic */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_process_aen(adapter); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, @@ -1740,8 +1748,6 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_process_aen(adapter); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, @@ -1789,8 +1795,6 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_process_aen(adapter); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, -- cgit v0.10.2 From 375345677c05cf87f1f0ff3d366e3f6000716955 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Fri, 2 Aug 2013 00:57:43 -0400 Subject: qlcnic: Update version to 5.2.45 Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index cd95442..eae41bc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 2 -#define _QLCNIC_LINUX_SUBVERSION 44 -#define QLCNIC_LINUX_VERSIONID "5.2.44" +#define _QLCNIC_LINUX_SUBVERSION 45 +#define QLCNIC_LINUX_VERSIONID "5.2.45" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v0.10.2 From 605f81aae7cfe4f61d0a5ee5f588f4956413858c Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:47:56 +0200 Subject: ath10k: implement rx checksum offloading HW supports L3/L4 rx checksum offloading. This should reduce CPU load and improve performance on slow host machines. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 04f08d9..e784c40 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -804,6 +804,37 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) return false; } +static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) +{ + struct htt_rx_desc *rxd; + u32 flags, info; + bool is_ip4, is_ip6; + bool is_tcp, is_udp; + bool ip_csum_ok, tcpudp_csum_ok; + + rxd = (void *)skb->data - sizeof(*rxd); + flags = __le32_to_cpu(rxd->attention.flags); + info = __le32_to_cpu(rxd->msdu_start.info1); + + is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); + is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); + is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); + is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); + ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); + tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); + + if (!is_ip4 && !is_ip6) + return CHECKSUM_NONE; + if (!is_tcp && !is_udp) + return CHECKSUM_NONE; + if (!ip_csum_ok) + return CHECKSUM_NONE; + if (!tcpudp_csum_ok) + return CHECKSUM_NONE; + + return CHECKSUM_UNNECESSARY; +} + static void ath10k_htt_rx_handler(struct ath10k_htt *htt, struct htt_rx_indication *rx) { @@ -815,6 +846,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, u8 *fw_desc; int i, j; int ret; + int ip_summed; memset(&info, 0, sizeof(info)); @@ -889,6 +921,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, continue; } + /* The skb is not yet processed and it may be + * reallocated. Since the offload is in the original + * skb extract the checksum now and assign it later */ + ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); + info.skb = msdu_head; info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); info.signal = ATH10K_DEFAULT_NOISE_FLOOR; @@ -914,6 +951,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); + info.skb->ip_summed = ip_summed; + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", info.skb->data, info.skb->len); ath10k_process_rx(htt->ar, &info); @@ -980,6 +1019,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, info.status = HTT_RX_IND_MPDU_STATUS_OK; info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), RX_MPDU_START_INFO0_ENCRYPT_TYPE); + info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); if (tkip_mic_err) { ath10k_warn("tkip mic error\n"); -- cgit v0.10.2 From 7c199997ded6c90fd45a50f49e9ac63adaacb95e Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:47:57 +0200 Subject: ath10k: implement tx checksum offloading HW supports L3/L4 tx checksum offloading. This should reduce CPU load and improve performance on slow host machines. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index dc3f3e8..656c254 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -465,6 +465,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 344ad27..5784337 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3309,6 +3309,8 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->iface_combinations = &ath10k_if_comb; ar->hw->wiphy->n_iface_combinations = 1; + ar->hw->netdev_features = NETIF_F_HW_CSUM; + ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_reg_notifier); if (ret) { -- cgit v0.10.2 From 2e1dea40512d7e99a7e91ac88a6f434a5d7c6fde Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:32:40 +0200 Subject: ath10k: implement get_survey() This implements a limited subset of what can be reported in the survey dump. This can be used for assessing approximate channel load, e.g. for automatic channel selection. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index ff42bb7..e4bba56 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -38,6 +38,7 @@ #define ATH10K_SCAN_ID 0 #define WMI_READY_TIMEOUT (5 * HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) +#define ATH10K_NUM_CHANS 38 /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 @@ -375,6 +376,12 @@ struct ath10k { struct work_struct restart_work; + /* cycle count is reported twice for each visited channel during scan. + * access protected by data_lock */ + u32 survey_last_rx_clear_count; + u32 survey_last_cycle_count; + struct survey_info survey[ATH10K_NUM_CHANS]; + #ifdef CONFIG_ATH10K_DEBUGFS struct ath10k_debug debug; #endif diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 5784337..9ea31f8 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2934,6 +2934,41 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw) mutex_unlock(&ar->conf_mutex); } +static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct ath10k *ar = hw->priv; + struct ieee80211_supported_band *sband; + struct survey_info *ar_survey = &ar->survey[idx]; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + if (!sband) + sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; + + if (!sband || idx >= sband->n_channels) { + ret = -ENOENT; + goto exit; + } + + spin_lock_bh(&ar->data_lock); + memcpy(survey, ar_survey, sizeof(*survey)); + spin_unlock_bh(&ar->data_lock); + + survey->channel = &sband->channels[idx]; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + static const struct ieee80211_ops ath10k_ops = { .tx = ath10k_tx, .start = ath10k_start, @@ -2955,6 +2990,7 @@ static const struct ieee80211_ops ath10k_ops = { .flush = ath10k_flush, .tx_last_beacon = ath10k_tx_last_beacon, .restart_complete = ath10k_restart_complete, + .get_survey = ath10k_get_survey, #ifdef CONFIG_PM .suspend = ath10k_suspend, .resume = ath10k_resume, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 1cbcb2e..55f90c7 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -390,9 +390,82 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) return 0; } +static int freq_to_idx(struct ath10k *ar, int freq) +{ + struct ieee80211_supported_band *sband; + int band, ch, idx = 0; + + for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { + sband = ar->hw->wiphy->bands[band]; + if (!sband) + continue; + + for (ch = 0; ch < sband->n_channels; ch++, idx++) + if (sband->channels[ch].center_freq == freq) + goto exit; + } + +exit: + return idx; +} + static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n"); + struct wmi_chan_info_event *ev; + struct survey_info *survey; + u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count; + int idx; + + ev = (struct wmi_chan_info_event *)skb->data; + + err_code = __le32_to_cpu(ev->err_code); + freq = __le32_to_cpu(ev->freq); + cmd_flags = __le32_to_cpu(ev->cmd_flags); + noise_floor = __le32_to_cpu(ev->noise_floor); + rx_clear_count = __le32_to_cpu(ev->rx_clear_count); + cycle_count = __le32_to_cpu(ev->cycle_count); + + ath10k_dbg(ATH10K_DBG_WMI, + "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", + err_code, freq, cmd_flags, noise_floor, rx_clear_count, + cycle_count); + + spin_lock_bh(&ar->data_lock); + + if (!ar->scan.in_progress) { + ath10k_warn("chan info event without a scan request?\n"); + goto exit; + } + + idx = freq_to_idx(ar, freq); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", + freq, idx); + goto exit; + } + + if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { + /* During scanning chan info is reported twice for each + * visited channel. The reported cycle count is global + * and per-channel cycle count must be calculated */ + + cycle_count -= ar->survey_last_cycle_count; + rx_clear_count -= ar->survey_last_rx_clear_count; + + survey = &ar->survey[idx]; + survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count); + survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count); + survey->noise = noise_floor; + survey->filled = SURVEY_INFO_CHANNEL_TIME | + SURVEY_INFO_CHANNEL_TIME_RX | + SURVEY_INFO_NOISE_DBM; + } + + ar->survey_last_rx_clear_count = rx_clear_count; + ar->survey_last_cycle_count = cycle_count; + +exit: + spin_unlock_bh(&ar->data_lock); } static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index da3b2bc..2c5a4f8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -2931,6 +2931,11 @@ struct wmi_chan_info_event { __le32 cycle_count; } __packed; +#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0) + +/* FIXME: empirically extrapolated */ +#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595) + /* Beacon filter wmi command info */ #define BCN_FLT_MAX_SUPPORTED_IES 256 #define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32) -- cgit v0.10.2 From 432358ed1d18c19dbf89008325ff6ba662d0996e Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:55:11 +0200 Subject: ath10k: prevent using invalid ringbuffer indexes If the device is removed and hotplug fails ioread32() will return 0xFFFFFFFF. In that case reading ringbuffer during device bringup led to out-of-bounds addressing of a ringbuffer array that in turn led to a paging failure. This could be reproduced by the following: * boot without acpi/prevent hotplug from working * insert and manually detect (pci rescan) the device * remove the device physically * load ath10k driver * kernel crashed Ringbuffer index reading is now protected by using an appropriate mask to prevent addressing an invalid array index. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index b407929..f8b969f 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -637,6 +637,7 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, ath10k_pci_wake(ar); src_ring->hw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + src_ring->hw_index &= nentries_mask; ath10k_pci_sleep(ar); } read_index = src_ring->hw_index; @@ -950,10 +951,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_pci_wake(ar); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + src_ring->sw_index &= src_ring->nentries_mask; src_ring->hw_index = src_ring->sw_index; src_ring->write_index = ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); + src_ring->write_index &= src_ring->nentries_mask; ath10k_pci_sleep(ar); src_ring->per_transfer_context = (void **)ptr; @@ -1035,8 +1038,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_pci_wake(ar); dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); + dest_ring->sw_index &= dest_ring->nentries_mask; dest_ring->write_index = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); + dest_ring->write_index &= dest_ring->nentries_mask; ath10k_pci_sleep(ar); dest_ring->per_transfer_context = (void **)ptr; -- cgit v0.10.2 From dcd4a561215b46d2d7c57b855a7da6a3e6e80c0e Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:55:12 +0200 Subject: ath10k: make sure to use passive scan when n_ssids is 0 Normally user specifies broadcast ssid for scanning. If the user wants to do a passive scan it does not pass any ssids. The patch makes sure we ath10k tells firmware to not send anything at all in case it decides no ssids equals broadcast ssid. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 9ea31f8..6b9f850 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2338,6 +2338,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.ssids[i].len = req->ssids[i].ssid_len; arg.ssids[i].ssid = req->ssids[i].ssid; } + } else { + arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; } if (req->n_channels) { -- cgit v0.10.2 From d531cb85d538d4a445e3bb3c669af794ea32e558 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:55:13 +0200 Subject: ath10k: advertise more conservative intf combinations Apparently the available firmware has a limit of handling 7 APs, 3 GOs or 8 STAs. This is based on empirical tests and it is still possible some combinations may crash the firmware. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 6b9f850..47c1163 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3114,9 +3114,15 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = { .max = 8, .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) - | BIT(NL80211_IFTYPE_P2P_GO) - | BIT(NL80211_IFTYPE_AP) - } + }, + { + .max = 3, + .types = BIT(NL80211_IFTYPE_P2P_GO) + }, + { + .max = 7, + .types = BIT(NL80211_IFTYPE_AP) + }, }; static const struct ieee80211_iface_combination ath10k_if_comb = { -- cgit v0.10.2 From 0dbd09e6284dc7c3de1470e2f1a3c83e0a0fc591 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:55:14 +0200 Subject: ath10k: zero arvif memory on add_interface() The private memory area in vif provided by mac80211 isn't guaranteed to be zeroed. This patch should fix issues when switching between STA and AP interface types. The tim_bitmap could become polluted by STA bssid field (since it's a union), wep_keys array could also become polluted with invalid pointers and probably much more. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 47c1163..cf2ba4d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1925,6 +1925,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); + memset(arvif, 0, sizeof(*arvif)); + arvif->ar = ar; arvif->vif = vif; -- cgit v0.10.2 From 591ecdb8f276925251bb5f5fad7eb064979ecee1 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 31 Jul 2013 10:55:15 +0200 Subject: ath10k: fix failpath in MSI-X setup pci_disable_msi() must be called if the initial request_irq() fails. Also add a warning message so it's possible to distinguish request_irq() failure and pci_enable_msi() failure. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index c71b488..d95439b 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1990,8 +1990,13 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ath10k_pci_msi_fw_handler, IRQF_SHARED, "ath10k_pci", ar); - if (ret) + if (ret) { + ath10k_warn("request_irq(%d) failed %d\n", + ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); + + pci_disable_msi(ar_pci->pdev); return ret; + } for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { ret = request_irq(ar_pci->pdev->irq + i, -- cgit v0.10.2 From e216975ad97cfcfc436789aa66d59a0e93f337f7 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 1 Aug 2013 16:17:47 -0700 Subject: uapi: Convert some uses of 6 to ETH_ALEN Use the #define where appropriate. Add #include where appropriate too. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h index 9c50445..5fbdd3d 100644 --- a/include/uapi/linux/dn.h +++ b/include/uapi/linux/dn.h @@ -2,6 +2,7 @@ #define _LINUX_DN_H #include +#include /* @@ -120,7 +121,7 @@ struct linkinfo_dn { * Ethernet address format (for DECnet) */ union etheraddress { - __u8 dne_addr[6]; /* Full ethernet address */ + __u8 dne_addr[ETH_ALEN]; /* Full ethernet address */ struct { __u8 dne_hiord[4]; /* DECnet HIORD prefix */ __u8 dne_nodeaddr[2]; /* DECnet node address */ diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 2d70d79..39f621a 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -14,6 +14,7 @@ #define _UAPI_LINUX_IF_BRIDGE_H #include +#include #define SYSFS_BRIDGE_ATTR "bridge" #define SYSFS_BRIDGE_FDB "brforward" @@ -88,7 +89,7 @@ struct __port_info { }; struct __fdb_entry { - __u8 mac_addr[6]; + __u8 mac_addr[ETH_ALEN]; __u8 port_no; __u8 is_local; __u32 ageing_timer_value; diff --git a/include/uapi/linux/netfilter_bridge/ebt_802_3.h b/include/uapi/linux/netfilter_bridge/ebt_802_3.h index 5bf8491..f37522a 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_802_3.h +++ b/include/uapi/linux/netfilter_bridge/ebt_802_3.h @@ -2,6 +2,7 @@ #define _UAPI__LINUX_BRIDGE_EBT_802_3_H #include +#include #define EBT_802_3_SAP 0x01 #define EBT_802_3_TYPE 0x02 @@ -42,8 +43,8 @@ struct hdr_ni { }; struct ebt_802_3_hdr { - __u8 daddr[6]; - __u8 saddr[6]; + __u8 daddr[ETH_ALEN]; + __u8 saddr[ETH_ALEN]; __be16 len; union { struct hdr_ui ui; diff --git a/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h index c6a204c..eac0f65 100644 --- a/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h +++ b/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h @@ -2,6 +2,7 @@ #define _IPT_CLUSTERIP_H_target #include +#include enum clusterip_hashmode { CLUSTERIP_HASHMODE_SIP = 0, @@ -22,7 +23,7 @@ struct ipt_clusterip_tgt_info { __u32 flags; /* only relevant for new ones */ - __u8 clustermac[6]; + __u8 clustermac[ETH_ALEN]; __u16 num_total_nodes; __u16 num_local_nodes; __u16 local_nodes[CLUSTERIP_MAX_NODES]; diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 227d4ce..172a7f0 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -60,7 +60,7 @@ struct virtio_net_config { /* The config defining mac address (if VIRTIO_NET_F_MAC) */ - __u8 mac[6]; + __u8 mac[ETH_ALEN]; /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ __u16 status; /* Maximum number of each of transmit and receive queues; diff --git a/include/uapi/linux/wimax/i2400m.h b/include/uapi/linux/wimax/i2400m.h index 62d3561..fd198bc 100644 --- a/include/uapi/linux/wimax/i2400m.h +++ b/include/uapi/linux/wimax/i2400m.h @@ -122,7 +122,7 @@ #define __LINUX__WIMAX__I2400M_H__ #include - +#include /* * Host Device Interface (HDI) common to all busses @@ -487,7 +487,7 @@ struct i2400m_tlv_l4_message_versions { struct i2400m_tlv_detailed_device_info { struct i2400m_tlv_hdr hdr; __u8 reserved1[400]; - __u8 mac_address[6]; + __u8 mac_address[ETH_ALEN]; __u8 reserved2[2]; } __attribute__((packed)); -- cgit v0.10.2 From 574e2af7c0af3273836def5e66f236521bb433c9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 1 Aug 2013 16:17:48 -0700 Subject: include: Convert ethernet mac address declarations to use ETH_ALEN It's convenient to have ethernet mac addresses use ETH_ALEN to be able to grep for them a bit easier and also to ensure that the addresses are __aligned(2). Add #include as necessary. Signed-off-by: Joe Perches Acked-by: Mauro Carvalho Chehab Signed-off-by: David S. Miller diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index 96e8769..841925f 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -14,6 +14,8 @@ #ifndef __DM9000_PLATFORM_DATA #define __DM9000_PLATFORM_DATA __FILE__ +#include + /* IO control flags */ #define DM9000_PLATF_8BITONLY (0x0001) @@ -27,7 +29,7 @@ struct dm9000_plat_data { unsigned int flags; - unsigned char dev_addr[6]; + unsigned char dev_addr[ETH_ALEN]; /* allow replacement IO routines */ diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 51b7934..343d82a 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -18,6 +18,7 @@ #include #include +#include #include #define FS_ENET_NAME "fs_enet" @@ -135,7 +136,7 @@ struct fs_platform_info { const struct fs_mii_bus_info *bus_info; int rx_ring, tx_ring; /* number of buffers on rx */ - __u8 macaddr[6]; /* mac address */ + __u8 macaddr[ETH_ALEN]; /* mac address */ int rx_copybreak; /* limit we copy small frames */ int use_napi; /* use NAPI */ int napi_weight; /* NAPI weight */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b0dc87a..4e101af 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -16,6 +16,7 @@ #define LINUX_IEEE80211_H #include +#include #include /* @@ -209,28 +210,28 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) struct ieee80211_hdr { __le16 frame_control; __le16 duration_id; - u8 addr1[6]; - u8 addr2[6]; - u8 addr3[6]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; __le16 seq_ctrl; - u8 addr4[6]; + u8 addr4[ETH_ALEN]; } __packed __aligned(2); struct ieee80211_hdr_3addr { __le16 frame_control; __le16 duration_id; - u8 addr1[6]; - u8 addr2[6]; - u8 addr3[6]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; __le16 seq_ctrl; } __packed __aligned(2); struct ieee80211_qos_hdr { __le16 frame_control; __le16 duration_id; - u8 addr1[6]; - u8 addr2[6]; - u8 addr3[6]; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; __le16 seq_ctrl; __le16 qos_ctrl; } __packed __aligned(2); @@ -608,8 +609,8 @@ struct ieee80211s_hdr { u8 flags; u8 ttl; __le32 seqnum; - u8 eaddr1[6]; - u8 eaddr2[6]; + u8 eaddr1[ETH_ALEN]; + u8 eaddr2[ETH_ALEN]; } __packed __aligned(2); /* Mesh flags */ @@ -758,7 +759,7 @@ struct ieee80211_rann_ie { u8 rann_flags; u8 rann_hopcount; u8 rann_ttl; - u8 rann_addr[6]; + u8 rann_addr[ETH_ALEN]; __le32 rann_seq; __le32 rann_interval; __le32 rann_metric; @@ -802,9 +803,9 @@ enum ieee80211_vht_opmode_bits { struct ieee80211_mgmt { __le16 frame_control; __le16 duration; - u8 da[6]; - u8 sa[6]; - u8 bssid[6]; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + u8 bssid[ETH_ALEN]; __le16 seq_ctrl; union { struct { @@ -833,7 +834,7 @@ struct ieee80211_mgmt { struct { __le16 capab_info; __le16 listen_interval; - u8 current_ap[6]; + u8 current_ap[ETH_ALEN]; /* followed by SSID and Supported rates */ u8 variable[0]; } __packed reassoc_req; @@ -966,21 +967,21 @@ struct ieee80211_vendor_ie { struct ieee80211_rts { __le16 frame_control; __le16 duration; - u8 ra[6]; - u8 ta[6]; + u8 ra[ETH_ALEN]; + u8 ta[ETH_ALEN]; } __packed __aligned(2); struct ieee80211_cts { __le16 frame_control; __le16 duration; - u8 ra[6]; + u8 ra[ETH_ALEN]; } __packed __aligned(2); struct ieee80211_pspoll { __le16 frame_control; __le16 aid; - u8 bssid[6]; - u8 ta[6]; + u8 bssid[ETH_ALEN]; + u8 ta[ETH_ALEN]; } __packed __aligned(2); /* TDLS */ @@ -989,14 +990,14 @@ struct ieee80211_pspoll { struct ieee80211_tdls_lnkie { u8 ie_type; /* Link Identifier IE */ u8 ie_len; - u8 bssid[6]; - u8 init_sta[6]; - u8 resp_sta[6]; + u8 bssid[ETH_ALEN]; + u8 init_sta[ETH_ALEN]; + u8 resp_sta[ETH_ALEN]; } __packed; struct ieee80211_tdls_data { - u8 da[6]; - u8 sa[6]; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; __be16 ether_type; u8 payload_type; u8 category; @@ -1090,8 +1091,8 @@ struct ieee80211_p2p_noa_attr { struct ieee80211_bar { __le16 frame_control; __le16 duration; - __u8 ra[6]; - __u8 ta[6]; + __u8 ra[ETH_ALEN]; + __u8 ta[ETH_ALEN]; __le16 control; __le16 start_seq_num; } __packed; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6aebdfe..09ef2f4 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -33,6 +33,7 @@ #ifndef MLX4_DEVICE_H #define MLX4_DEVICE_H +#include #include #include #include @@ -620,7 +621,7 @@ struct mlx4_eth_av { u8 dgid[16]; u32 reserved4[2]; __be16 vlan; - u8 mac[6]; + u8 mac[ETH_ALEN]; }; union mlx4_ext_av { @@ -914,10 +915,10 @@ enum mlx4_net_trans_promisc_mode { }; struct mlx4_spec_eth { - u8 dst_mac[6]; - u8 dst_mac_msk[6]; - u8 src_mac[6]; - u8 src_mac_msk[6]; + u8 dst_mac[ETH_ALEN]; + u8 dst_mac_msk[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + u8 src_mac_msk[ETH_ALEN]; u8 ether_type_enable; __be16 ether_type; __be16 vlan_id_msk; diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 262deac..6d35147 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -34,6 +34,7 @@ #define MLX4_QP_H #include +#include #include @@ -143,7 +144,7 @@ struct mlx4_qp_path { u8 feup; u8 fvl_rx; u8 reserved4[2]; - u8 dmac[6]; + u8 dmac[ETH_ALEN]; }; enum { /* fl */ @@ -318,7 +319,7 @@ struct mlx4_wqe_datagram_seg { __be32 dqpn; __be32 qkey; __be16 vlan; - u8 mac[6]; + u8 mac[ETH_ALEN]; }; struct mlx4_wqe_lso_seg { diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 6e8215b..61a0da3 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -6,6 +6,7 @@ #define __LINUX_MV643XX_ETH_H #include +#include #define MV643XX_ETH_SHARED_NAME "mv643xx_eth" #define MV643XX_ETH_NAME "mv643xx_eth_port" @@ -48,7 +49,7 @@ struct mv643xx_eth_platform_data { * Use this MAC address if it is valid, overriding the * address that is already in the hardware. */ - u8 mac_addr[6]; + u8 mac_addr[ETH_ALEN]; /* * If speed is 0, autonegotiation is enabled. diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index fc30571..6205eeb 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -2,6 +2,7 @@ #define __ASM_SH_ETH_H__ #include +#include enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; enum { @@ -18,7 +19,7 @@ struct sh_eth_plat_data { phy_interface_t phy_interface; void (*set_mdio_gate)(void *addr); - unsigned char mac_addr[6]; + unsigned char mac_addr[ETH_ALEN]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; unsigned needs_init:1; diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h index 4dde70e..eec3efd 100644 --- a/include/linux/smsc911x.h +++ b/include/linux/smsc911x.h @@ -22,6 +22,7 @@ #define __LINUX_SMSC911X_H__ #include +#include /* platform_device configuration data, should be assigned to * the platform_device's dev.platform_data */ @@ -31,7 +32,7 @@ struct smsc911x_platform_config { unsigned int flags; unsigned int shift; phy_interface_t phy_interface; - unsigned char mac[6]; + unsigned char mac[ETH_ALEN]; }; /* Constants for platform_device irq polarity configuration */ diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h index b52e44f..0df24bf 100644 --- a/include/linux/uwb/spec.h +++ b/include/linux/uwb/spec.h @@ -32,6 +32,7 @@ #include #include +#include #define i1480_FW 0x00000303 /* #define i1480_FW 0x00000302 */ @@ -130,7 +131,7 @@ enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; * it is also used to define headers sent down and up the wire/radio). */ struct uwb_mac_addr { - u8 data[6]; + u8 data[ETH_ALEN]; } __attribute__((packed)); @@ -568,7 +569,7 @@ struct uwb_rc_evt_confirm { /* Device Address Management event. [WHCI] section 3.1.3.2. */ struct uwb_rc_evt_dev_addr_mgmt { struct uwb_rceb rceb; - u8 baAddr[6]; + u8 baAddr[ETH_ALEN]; u8 bResultCode; } __attribute__((packed)); diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h index 4a1191a..f7119ee 100644 --- a/include/media/tveeprom.h +++ b/include/media/tveeprom.h @@ -12,6 +12,8 @@ enum tveeprom_audio_processor { TVEEPROM_AUDPROC_OTHER, }; +#include + struct tveeprom { u32 has_radio; /* If has_ir == 0, then it is unknown what the IR capabilities are, @@ -40,7 +42,7 @@ struct tveeprom { u32 revision; u32 serial_number; char rev_str[5]; - u8 MAC_address[6]; + u8 MAC_address[ETH_ALEN]; }; void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h index 0af8b8d..550c2d6 100644 --- a/include/net/irda/irlan_common.h +++ b/include/net/irda/irlan_common.h @@ -32,6 +32,7 @@ #include #include #include +#include #include @@ -161,7 +162,7 @@ struct irlan_provider_cb { int access_type; /* Access type */ __u16 send_arb_val; - __u8 mac_address[6]; /* Generated MAC address for peer device */ + __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */ }; /* -- cgit v0.10.2 From 1409a93274bb1b17f0b7a0b255a75e80899eec11 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 1 Aug 2013 16:17:49 -0700 Subject: ethernet: Convert mac address uses of 6 to ETH_ALEN Use the normal #define to help grep find mac addresses and ensure that addresses are aligned. pasemi.h has an unaligned access to mac_addr, unchanged for now. Signed-off-by: Joe Perches Acked-by: Olof Johansson # pasemi_mac pieces Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index e1d2643..b7232a9 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev) #ifdef CONFIG_AX88796_93CX6 if (ax->plat->flags & AXFLG_HAS_93CX6) { - unsigned char mac_addr[6]; + unsigned char mac_addr[ETH_ALEN]; struct eeprom_93cx6 eeprom; eeprom.data = ei_local; @@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev) (__le16 __force *)mac_addr, sizeof(mac_addr) >> 1); - memcpy(dev->dev_addr, mac_addr, 6); + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); } #endif if (ax->plat->wordlength == 2) { diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index ed21307..2d8e288 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) char *chipname; struct net_device *dev; const struct pcnet32_access *a = NULL; - u8 promaddr[6]; + u8 promaddr[ETH_ALEN]; int ret = -ENODEV; /* reset the chip */ @@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } /* read PROM address and compare with CSR address */ - for (i = 0; i < 6; i++) + for (i = 0; i < ETH_ALEN; i++) promaddr[i] = inb(ioaddr + i); - if (memcmp(promaddr, dev->dev_addr, 6) || + if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) || !is_valid_ether_addr(dev->dev_addr)) { if (is_valid_ether_addr(promaddr)) { if (pcnet32_debug & NETIF_MSG_PROBE) { diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 82a0312..95aff76 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -238,8 +238,8 @@ struct cnic_sock { u16 src_port; u16 dst_port; u16 vlan_id; - unsigned char old_ha[6]; - unsigned char ha[6]; + unsigned char old_ha[ETH_ALEN]; + unsigned char ha[ETH_ALEN]; u32 mtu; u32 cid; u32 l5_cid; @@ -308,7 +308,7 @@ struct cnic_dev { #define CNIC_F_BNX2_CLASS 3 #define CNIC_F_BNX2X_CLASS 4 atomic_t ref_count; - u8 mac_addr[6]; + u8 mac_addr[ETH_ALEN]; int max_iscsi_conn; int max_fcoe_conn; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index c94152f..4e8cfa2 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct tulip_private *tp; /* See note below on the multiport cards. */ - static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; + static unsigned char last_phys_addr[ETH_ALEN] = { + 0x00, 'L', 'i', 'n', 'u', 'x' + }; static int last_irq; static int multiport_cnt; /* For four-port boards w/one EEPROM */ int i, irq; @@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->dev_addr[i] = last_phys_addr[i] + 1; #if defined(CONFIG_SPARC) addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == 6) - memcpy(dev->dev_addr, addr, 6); + if (addr && len == ETH_ALEN) + memcpy(dev->dev_addr, addr, ETH_ALEN); #endif #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ if (last_irq) diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h index 93346f0..79aef68 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.h +++ b/drivers/net/ethernet/i825xx/sun3_82586.h @@ -133,8 +133,8 @@ struct rfd_struct unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ unsigned short next; /* linkoffset to next RFD */ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ - unsigned char dest[6]; /* ethernet-address, destination */ - unsigned char source[6]; /* ethernet-address, source */ + unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */ + unsigned char source[ETH_ALEN]; /* ethernet-address, source */ unsigned short length; /* 802.3 frame-length */ unsigned short zero_dummy; /* dummy */ }; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 967bae8..d4cdf4d 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -244,7 +244,7 @@ struct myri10ge_priv { int fw_ver_minor; int fw_ver_tiny; int adopted_rx_filter_bug; - u8 mac_addr[6]; /* eeprom mac address */ + u8 mac_addr[ETH_ALEN]; /* eeprom mac address */ unsigned long serial_number; int vendor_specific_offset; int fw_multicast_support; diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index e88bdb1..dcfe58f 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; - char addr[6]; + char addr[ETH_ALEN]; pdev = ether->pdev; @@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev) addr[5] = 0xa8; if (is_valid_ether_addr(addr)) - memcpy(dev->dev_addr, &addr, 0x06); + memcpy(dev->dev_addr, &addr, ETH_ALEN); else dev_err(&pdev->dev, "invalid mac address\n"); } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index a5f0b5d..f21ae7b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) struct device_node *dn = pci_device_to_OF_node(pdev); int len; const u8 *maddr; - u8 addr[6]; + u8 addr[ETH_ALEN]; if (!dn) { dev_dbg(&pdev->dev, @@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) maddr = of_get_property(dn, "local-mac-address", &len); - if (maddr && len == 6) { - memcpy(mac->mac_addr, maddr, 6); + if (maddr && len == ETH_ALEN) { + memcpy(mac->mac_addr, maddr, ETH_ALEN); return 0; } @@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) return -ENOENT; } - if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], - &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { + if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) + != ETH_ALEN) { dev_warn(&pdev->dev, "can't parse mac address, not configuring\n"); return -EINVAL; } - memcpy(mac->mac_addr, addr, 6); + memcpy(mac->mac_addr, addr, ETH_ALEN); return 0; } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h index e2f4efa..f2749d4 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.h +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -83,7 +83,7 @@ struct pasemi_mac { #define MAC_TYPE_GMAC 1 #define MAC_TYPE_XAUI 2 - u8 mac_addr[6]; + u8 mac_addr[ETH_ALEN]; struct net_lro_mgr lro_mgr; struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 9fbb1cd..8375cbd 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; - u8 null_addr[6]; + u8 null_addr[ETH_ALEN]; int i; - memset(null_addr, 0, 6); + memset(null_addr, 0, ETH_ALEN); if (netdev->flags & IFF_PROMISC) { diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 7e8d682..8994337 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -2149,7 +2149,7 @@ struct ql_adapter { struct timer_list timer; atomic_t lb_count; /* Keep local copy of current mac address. */ - char current_mac_addr[6]; + char current_mac_addr[ETH_ALEN]; }; /* -- cgit v0.10.2 From d27fc78208b53ccdfd6a57d4ac44a459ca66806f Mon Sep 17 00:00:00 2001 From: "fan.du" Date: Fri, 2 Aug 2013 10:45:13 +0800 Subject: sctp: Don't lookup dst if transport dst is still valid When sctp sits on IPv6, sctp_transport_dst_check pass cookie as ZERO, as a result ip6_dst_check always fail out. This behaviour makes transport->dst useless, because every sctp_packet_transmit must look for valid dst. Add a dst_cookie into sctp_transport, and set the cookie whenever we get new dst for sctp_transport. So dst validness could be checked against it. Since I have split genid for IPv4 and IPv6, also delete/add IPv6 address will also bump IPv6 genid. So issues we discussed in: http://marc.info/?l=linux-netdev&m=137404469219410&w=4 have all been sloved for this patch. Signed-off-by: Fan Du Acked-by: Vlad Yasevich Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 554cf88..cb28df9 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -613,7 +613,7 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && !dst_check(t->dst, 0)) { + if (t->dst && !dst_check(t->dst, t->dst_cookie)) { dst_release(t->dst); t->dst = NULL; } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 75c4c16..c0f4e29 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -946,6 +946,7 @@ struct sctp_transport { __u64 hb_nonce; struct rcu_head rcu; + u32 dst_cookie; }; struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *, diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 85d688f..5a9402e 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -351,7 +351,7 @@ out: rt = (struct rt6_info *)dst; t->dst = dst; - + t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr, &fl6->saddr); } else { -- cgit v0.10.2 From 0f75b09c798ed00c30d7d5551b896be883bc2aeb Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Fri, 2 Aug 2013 11:37:39 +0200 Subject: af_packet: when sending ethernet frames, parse header for skb->protocol This may be necessary when the SKB is passed to other layers on the go, which check the protocol field on their own. An example is a VLAN packet sent out using AF_PACKET on a bridge interface. The bridging code checks the SKB size, accounting for any VLAN header only if the protocol field is set accordingly. Note that eth_type_trans() sets skb->dev to the passed argument, so this can be skipped in packet_snd() for ethernet frames, as well. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4cb28a7..71db35e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -88,6 +88,7 @@ #include #include #include +#include #ifdef CONFIG_INET #include @@ -2005,6 +2006,9 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, if (unlikely(err)) return err; + if (dev->type == ARPHRD_ETHER) + skb->protocol = eth_type_trans(skb, dev); + data += dev->hard_header_len; to_write -= dev->hard_header_len; } @@ -2324,6 +2328,13 @@ static int packet_snd(struct socket *sock, sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + if (dev->type == ARPHRD_ETHER) { + skb->protocol = eth_type_trans(skb, dev); + } else { + skb->protocol = proto; + skb->dev = dev; + } + if (!gso_type && (len > dev->mtu + reserve + extra_len)) { /* Earlier code assumed this would be a VLAN pkt, * double-check this now that we have the actual @@ -2338,8 +2349,6 @@ static int packet_snd(struct socket *sock, } } - skb->protocol = proto; - skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; -- cgit v0.10.2 From cbd89acb9eb257ed3b2be867142583fdcf7fdc5b Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Fri, 2 Aug 2013 11:37:40 +0200 Subject: af_packet: fix for sending VLAN frames via packet_mmap Since tpacket_fill_skb() parses the protocol field in ethernet frames' headers, it's easy to see if any passed frame is a VLAN one and account for the extended size. But as the real protocol does not turn up before tpacket_fill_skb() runs which in turn also checks the frame length, move the max frame length calculation into the function. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 71db35e..2b1470d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1924,7 +1924,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, __be16 proto, unsigned char *addr, int hlen) { union tpacket_uhdr ph; - int to_write, offset, len, tp_len, nr_frags, len_max; + int to_write, offset, len, tp_len, nr_frags, len_max, max_frame_len; struct socket *sock = po->sk.sk_socket; struct page *page; void *data; @@ -1947,10 +1947,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, tp_len = ph.h1->tp_len; break; } - if (unlikely(tp_len > size_max)) { - pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); - return -EMSGSIZE; - } skb_reserve(skb, hlen); skb_reset_network_header(skb); @@ -2013,6 +2009,18 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, to_write -= dev->hard_header_len; } + max_frame_len = dev->mtu + dev->hard_header_len; + if (skb->protocol == htons(ETH_P_8021Q)) + max_frame_len += VLAN_HLEN; + + if (size_max > max_frame_len) + size_max = max_frame_len; + + if (unlikely(tp_len > size_max)) { + pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); + return -EMSGSIZE; + } + offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); @@ -2051,7 +2059,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) struct net_device *dev; __be16 proto; bool need_rls_dev = false; - int err, reserve = 0; + int err; void *ph; struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; int tp_len, size_max; @@ -2084,8 +2092,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) if (unlikely(dev == NULL)) goto out; - reserve = dev->hard_header_len; - err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; @@ -2093,9 +2099,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); - if (size_max > dev->mtu + reserve) - size_max = dev->mtu + reserve; - do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); -- cgit v0.10.2 From c483e02614551e44ced3fe6eedda8e36d3277ccc Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Fri, 2 Aug 2013 11:37:41 +0200 Subject: af_packet: simplify VLAN frame check in packet_snd For ethernet frames, eth_type_trans() already parses the header, so one can skip this when checking the frame size. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2b1470d..0c0f6c9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2333,23 +2333,16 @@ static int packet_snd(struct socket *sock, if (dev->type == ARPHRD_ETHER) { skb->protocol = eth_type_trans(skb, dev); + if (skb->protocol == htons(ETH_P_8021Q)) + reserve += VLAN_HLEN; } else { skb->protocol = proto; skb->dev = dev; } if (!gso_type && (len > dev->mtu + reserve + extra_len)) { - /* Earlier code assumed this would be a VLAN pkt, - * double-check this now that we have the actual - * packet in hand. - */ - struct ethhdr *ehdr; - skb_reset_mac_header(skb); - ehdr = eth_hdr(skb); - if (ehdr->h_proto != htons(ETH_P_8021Q)) { - err = -EMSGSIZE; - goto out_free; - } + err = -EMSGSIZE; + goto out_free; } skb->priority = sk->sk_priority; -- cgit v0.10.2 From 9cc08af3a1d9d1687cb2ad6063ac1552ec2f695a Mon Sep 17 00:00:00 2001 From: Werner Almesberger Date: Fri, 2 Aug 2013 10:51:19 -0300 Subject: icmpv6_filter: fix "_hdr" incorrectly being a pointer "_hdr" should hold the ICMPv6 header while "hdr" is the pointer to it. This worked by accident. Signed-off-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c45f7a5..164fb5f 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -108,7 +108,7 @@ found: */ static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) { - struct icmp6hdr *_hdr; + struct icmp6hdr _hdr; const struct icmp6hdr *hdr; hdr = skb_header_pointer(skb, skb_transport_offset(skb), -- cgit v0.10.2 From d1c53c8e870cdedb6fc9550f41c558bab45b5219 Mon Sep 17 00:00:00 2001 From: Werner Almesberger Date: Fri, 2 Aug 2013 10:51:34 -0300 Subject: icmpv6_filter: allow ICMPv6 messages with bodies < 4 bytes By using sizeof(_hdr), net/ipv6/raw.c:icmpv6_filter implicitly assumes that any valid ICMPv6 message is at least eight bytes long, i.e., that the message body is at least four bytes. The DIS message of RPL (RFC 6550 section 6.2, from the 6LoWPAN world), has a minimum length of only six bytes, and is thus blocked by icmpv6_filter. RFC 4443 seems to allow even a zero-sized body, making the minimum allowable message size four bytes. Signed-off-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 164fb5f..c1e5334 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -63,6 +63,8 @@ #include #include +#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */ + static struct raw_hashinfo raw_v6_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), }; @@ -111,8 +113,11 @@ static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) struct icmp6hdr _hdr; const struct icmp6hdr *hdr; + /* We require only the four bytes of the ICMPv6 header, not any + * additional bytes of message body in "struct icmp6hdr". + */ hdr = skb_header_pointer(skb, skb_transport_offset(skb), - sizeof(_hdr), &_hdr); + ICMPV6_HDRLEN, &_hdr); if (hdr) { const __u32 *data = &raw6_sk(sk)->filter.data[0]; unsigned int type = hdr->icmp6_type; -- cgit v0.10.2 From 6ef94cfafba159d6b1a902ccb3349ac6a34ff6ad Mon Sep 17 00:00:00 2001 From: Stefan Tomanek Date: Fri, 2 Aug 2013 17:19:56 +0200 Subject: fib_rules: add route suppression based on ifgroup This change adds the ability to suppress a routing decision based upon the interface group the selected interface belongs to. This allows it to exclude specific devices from a routing decision. Signed-off-by: Stefan Tomanek Signed-off-by: David S. Miller diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 2f286dc..d13c461 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -18,6 +18,7 @@ struct fib_rule { u32 pref; u32 flags; u32 table; + int suppress_ifgroup; u8 table_prefixlen_min; u8 action; u32 target; @@ -84,6 +85,7 @@ struct fib_rules_ops { [FRA_FWMASK] = { .type = NLA_U32 }, \ [FRA_TABLE] = { .type = NLA_U32 }, \ [FRA_TABLE_PREFIXLEN_MIN] = { .type = NLA_U8 }, \ + [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \ [FRA_GOTO] = { .type = NLA_U32 } static inline void fib_rule_get(struct fib_rule *rule) diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h index 59cd31b..63e3116 100644 --- a/include/uapi/linux/fib_rules.h +++ b/include/uapi/linux/fib_rules.h @@ -44,7 +44,7 @@ enum { FRA_FWMARK, /* mark */ FRA_FLOW, /* flow/class id */ FRA_UNUSED6, - FRA_UNUSED7, + FRA_SUPPRESS_IFGROUP, FRA_TABLE_PREFIXLEN_MIN, FRA_TABLE, /* Extended table id */ FRA_FWMASK, /* mask for netfilter mark */ diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 2ef5040..5040a61 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -343,6 +343,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) if (tb[FRA_TABLE_PREFIXLEN_MIN]) rule->table_prefixlen_min = nla_get_u8(tb[FRA_TABLE_PREFIXLEN_MIN]); + if (tb[FRA_SUPPRESS_IFGROUP]) + rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); + if (!tb[FRA_PRIORITY] && ops->default_pref) rule->pref = ops->default_pref(ops); @@ -529,6 +532,7 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, + nla_total_size(4) /* FRA_PRIORITY */ + nla_total_size(4) /* FRA_TABLE */ + nla_total_size(1) /* FRA_TABLE_PREFIXLEN_MIN */ + + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ + nla_total_size(4) /* FRA_FWMARK */ + nla_total_size(4); /* FRA_FWMASK */ @@ -588,6 +592,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, (rule->target && nla_put_u32(skb, FRA_GOTO, rule->target))) goto nla_put_failure; + + if (rule->suppress_ifgroup != -1) { + if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) + goto nla_put_failure; + } + if (ops->fill(rule, skb, frh) < 0) goto nla_put_failure; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 9f29066..b78fd28 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -103,16 +103,27 @@ errout: static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) { + struct fib_result *result = (struct fib_result *) arg->result; + struct net_device *dev = result->fi->fib_dev; + /* do not accept result if the route does * not meet the required prefix length */ - struct fib_result *result = (struct fib_result *) arg->result; - if (result->prefixlen < rule->table_prefixlen_min) { - if (!(arg->flags & FIB_LOOKUP_NOREF)) - fib_info_put(result->fi); - return true; - } + if (result->prefixlen < rule->table_prefixlen_min) + goto suppress_route; + + /* do not accept result if the route uses a device + * belonging to a forbidden interface group + */ + if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup) + goto suppress_route; + return false; + +suppress_route: + if (!(arg->flags & FIB_LOOKUP_NOREF)) + fib_info_put(result->fi); + return true; } static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 554a4fb..3628326 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -122,14 +122,24 @@ out: static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) { struct rt6_info *rt = (struct rt6_info *) arg->result; + struct net_device *dev = rt->rt6i_idev->dev; /* do not accept result if the route does * not meet the required prefix length */ - if (rt->rt6i_dst.plen < rule->table_prefixlen_min) { + if (rt->rt6i_dst.plen < rule->table_prefixlen_min) + goto suppress_route; + + /* do not accept result if the route uses a device + * belonging to a forbidden interface group + */ + if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup) + goto suppress_route; + + return false; + +suppress_route: ip6_rt_put(rt); return true; - } - return false; } static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) -- cgit v0.10.2 From 8a849bb7f0ac513c2b7202620f611d94301c4a93 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 2 Aug 2013 17:32:39 +0200 Subject: net: netlink: minor: remove unused pointer in alloc_pg_vec Variable ptr is being assigned, but never used, so just remove it. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0c61b59..6273772 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -294,14 +294,14 @@ static void **alloc_pg_vec(struct netlink_sock *nlk, { unsigned int block_nr = req->nm_block_nr; unsigned int i; - void **pg_vec, *ptr; + void **pg_vec; pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL); if (pg_vec == NULL) return NULL; for (i = 0; i < block_nr; i++) { - pg_vec[i] = ptr = alloc_one_pg_vec_page(order); + pg_vec[i] = alloc_one_pg_vec_page(order); if (pg_vec[i] == NULL) goto err1; } -- cgit v0.10.2 From 63134803a6369dcf7dddf7f0d5e37b9566b308d2 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Fri, 2 Aug 2013 19:07:38 +0200 Subject: neighbour: populate neigh_parms on alloc before calling ndo_neigh_setup dev->ndo_neigh_setup() might need some of the values of neigh_parms, so populate them before calling it. Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/net/core/neighbour.c b/net/core/neighbour.c index b7de821..576d46f 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, atomic_set(&p->refcnt, 1); p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); + dev_hold(dev); + p->dev = dev; + write_pnet(&p->net, hold_net(net)); + p->sysctl_table = NULL; if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { + release_net(net); + dev_put(dev); kfree(p); return NULL; } - dev_hold(dev); - p->dev = dev; - write_pnet(&p->net, hold_net(net)); - p->sysctl_table = NULL; write_lock_bh(&tbl->lock); p->next = tbl->parms.next; tbl->parms.next = p; -- cgit v0.10.2 From 9918d5bf329d0dc5bb2d9d293bcb772bdb626e65 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Fri, 2 Aug 2013 19:07:39 +0200 Subject: bonding: modify only neigh_parms owned by us Otherwise, on neighbour creation, bond_neigh_init() will be called with a foreign netdev. Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1d37a96..476df7d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3630,11 +3630,17 @@ static int bond_neigh_init(struct neighbour *n) * The bonding ndo_neigh_setup is called at init time beofre any * slave exists. So we must declare proxy setup function which will * be used at run time to resolve the actual slave neigh param setup. + * + * It's also called by master devices (such as vlans) to setup their + * underlying devices. In that case - do nothing, we're already set up from + * our init. */ static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms) { - parms->neigh_setup = bond_neigh_init; + /* modify only our neigh_parms */ + if (parms->dev == dev) + parms->neigh_setup = bond_neigh_init; return 0; } -- cgit v0.10.2 From 9e9402eb450f1c921cc130765a4554a84821feab Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 2 Aug 2013 11:28:23 -0700 Subject: cnic, bnx2i: Fix bug on some bnx2x devices that don't support iSCSI On some bnx2x devices, iSCSI is determined to be unsupported only after firmware is downloaded. We need to check max_iscsi_conn again after NETDEV_UP and block iSCSI init operations. Without this fix, iscsiadm can hang as the firmware will not respond to the iSCSI init message. Signed-off-by: Eddie Wai Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 6bd6d14..4f8a535 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5276,6 +5276,13 @@ static int cnic_register_netdev(struct cnic_dev *dev) if (err) netdev_err(dev->netdev, "register_cnic failed\n"); + /* Read iSCSI config again. On some bnx2x device, iSCSI config + * can change after firmware is downloaded. + */ + dev->max_iscsi_conn = ethdev->max_iscsi_conn; + if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + dev->max_iscsi_conn = 0; + return err; } diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 50fef69..3104202 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -172,16 +172,14 @@ void bnx2i_start(void *handle) struct bnx2i_hba *hba = handle; int i = HZ; - /* - * We should never register devices that don't support iSCSI - * (see bnx2i_init_one), so something is wrong if we try to - * start a iSCSI adapter on hardware with 0 supported iSCSI - * connections + /* On some bnx2x devices, it is possible that iSCSI is no + * longer supported after firmware is downloaded. In that + * case, the iscsi_init_msg will return failure. */ - BUG_ON(!hba->cnic->max_iscsi_conn); bnx2i_send_fw_iscsi_init_msg(hba); - while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && + !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--) msleep(BNX2I_INIT_POLL_TIME); } -- cgit v0.10.2 From 0c0667a8548ef2985038a5a1d0fa0f64e2774694 Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Sat, 3 Aug 2013 16:54:50 +0800 Subject: vlan: cleanup the usage of vlan_dev_priv(dev) This patch cleanup 2 points for the usage of vlan_dev_priv(dev): * In vlan_dev.c/vlan_dev_hard_header, we should use the var *vlan directly after grabing the pointer at the beginning with *vlan = vlan_dev_priv(dev); when we need to access the fields of *vlan. * In vlan.c/register_vlan_device, add the var *vlan pointer struct vlan_dev_priv *vlan; to cleanup the code to access the fields of vlan_dev_priv(new_dev). Signed-off-by: Wang Sheng-Hui Signed-off-by: David S. Miller diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 03a92e1..61fc573 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -210,6 +210,7 @@ out_vid_del: static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) { struct net_device *new_dev; + struct vlan_dev_priv *vlan; struct net *net = dev_net(real_dev); struct vlan_net *vn = net_generic(net, vlan_net_id); char name[IFNAMSIZ]; @@ -260,11 +261,12 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) new_dev->mtu = real_dev->mtu; new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); - vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q); - vlan_dev_priv(new_dev)->vlan_id = vlan_id; - vlan_dev_priv(new_dev)->real_dev = real_dev; - vlan_dev_priv(new_dev)->dent = NULL; - vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR; + vlan = vlan_dev_priv(new_dev); + vlan->vlan_proto = htons(ETH_P_8021Q); + vlan->vlan_id = vlan_id; + vlan->real_dev = real_dev; + vlan->dent = NULL; + vlan->flags = VLAN_FLAG_REORDER_HDR; new_dev->rtnl_link_ops = &vlan_link_ops; err = register_vlan_dev(new_dev); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 1cd3d2a..9ab8a7e 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -107,10 +107,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, u16 vlan_tci = 0; int rc; - if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) { + if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) { vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); - vlan_tci = vlan_dev_priv(dev)->vlan_id; + vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); vhdr->h_vlan_TCI = htons(vlan_tci); @@ -133,7 +133,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, saddr = dev->dev_addr; /* Now make the underlying real hard header */ - dev = vlan_dev_priv(dev)->real_dev; + dev = vlan->real_dev; rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen); if (rc > 0) rc += vhdrlen; -- cgit v0.10.2 From 73f5698e77219bfc3ea1903759fe8e20ab5b285e Mon Sep 17 00:00:00 2001 From: Stefan Tomanek Date: Sat, 3 Aug 2013 14:14:43 +0200 Subject: fib_rules: fix suppressor names and default values This change brings the suppressor attribute names into line; it also changes the data types to provide a more consistent interface. While -1 indicates that the suppressor is not enabled, values >= 0 for suppress_prefixlen or suppress_ifgroup reject routing decisions violating the constraint. This changes the previously presented behaviour of suppress_prefixlen, where a prefix length _less_ than the attribute value was rejected. After this change, a prefix length less than *or* equal to the value is considered a violation of the rule constraint. It also changes the default values for default and newly added rules (disabling any suppression for those). Signed-off-by: Stefan Tomanek Signed-off-by: David S. Miller diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index d13c461..9d0fcbaa 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -19,7 +19,7 @@ struct fib_rule { u32 flags; u32 table; int suppress_ifgroup; - u8 table_prefixlen_min; + int suppress_prefixlen; u8 action; u32 target; struct fib_rule __rcu *ctarget; @@ -84,7 +84,7 @@ struct fib_rules_ops { [FRA_FWMARK] = { .type = NLA_U32 }, \ [FRA_FWMASK] = { .type = NLA_U32 }, \ [FRA_TABLE] = { .type = NLA_U32 }, \ - [FRA_TABLE_PREFIXLEN_MIN] = { .type = NLA_U8 }, \ + [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \ [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \ [FRA_GOTO] = { .type = NLA_U32 } diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h index 63e3116..2b82d7e 100644 --- a/include/uapi/linux/fib_rules.h +++ b/include/uapi/linux/fib_rules.h @@ -45,7 +45,7 @@ enum { FRA_FLOW, /* flow/class id */ FRA_UNUSED6, FRA_SUPPRESS_IFGROUP, - FRA_TABLE_PREFIXLEN_MIN, + FRA_SUPPRESS_PREFIXLEN, FRA_TABLE, /* Extended table id */ FRA_FWMASK, /* mask for netfilter mark */ FRA_OIFNAME, diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 5040a61..2e65413 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -33,6 +33,9 @@ int fib_default_rule_add(struct fib_rules_ops *ops, r->flags = flags; r->fr_net = hold_net(ops->fro_net); + r->suppress_prefixlen = -1; + r->suppress_ifgroup = -1; + /* The lock is not required here, the list in unreacheable * at the moment this function is called */ list_add_tail(&r->list, &ops->rules_list); @@ -340,11 +343,15 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) rule->action = frh->action; rule->flags = frh->flags; rule->table = frh_get_table(frh, tb); - if (tb[FRA_TABLE_PREFIXLEN_MIN]) - rule->table_prefixlen_min = nla_get_u8(tb[FRA_TABLE_PREFIXLEN_MIN]); + if (tb[FRA_SUPPRESS_PREFIXLEN]) + rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); + else + rule->suppress_prefixlen = -1; if (tb[FRA_SUPPRESS_IFGROUP]) rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); + else + rule->suppress_ifgroup = -1; if (!tb[FRA_PRIORITY] && ops->default_pref) rule->pref = ops->default_pref(ops); @@ -531,7 +538,7 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ + nla_total_size(4) /* FRA_PRIORITY */ + nla_total_size(4) /* FRA_TABLE */ - + nla_total_size(1) /* FRA_TABLE_PREFIXLEN_MIN */ + + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ + nla_total_size(4) /* FRA_FWMARK */ + nla_total_size(4); /* FRA_FWMASK */ @@ -558,7 +565,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh->table = rule->table; if (nla_put_u32(skb, FRA_TABLE, rule->table)) goto nla_put_failure; - if (nla_put_u8(skb, FRA_TABLE_PREFIXLEN_MIN, rule->table_prefixlen_min)) + if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) goto nla_put_failure; frh->res1 = 0; frh->res2 = 0; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index b78fd28..523be38 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -109,7 +109,7 @@ static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg /* do not accept result if the route does * not meet the required prefix length */ - if (result->prefixlen < rule->table_prefixlen_min) + if (result->prefixlen <= rule->suppress_prefixlen) goto suppress_route; /* do not accept result if the route uses a device diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 3628326..a6c58ce 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -126,7 +126,7 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg /* do not accept result if the route does * not meet the required prefix length */ - if (rt->rt6i_dst.plen < rule->table_prefixlen_min) + if (rt->rt6i_dst.plen <= rule->suppress_prefixlen) goto suppress_route; /* do not accept result if the route uses a device -- cgit v0.10.2 From fba3679d34511c42bf452e89dda457a1219eb43a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 3 Aug 2013 11:50:35 -0700 Subject: fib_rules: reorder struct fib_rules fields Move refcnt, pref, suppress_ifgroup, suppress_prefixlen out of first cache line, as they are not used in fast path. Make sure ctarget & fr_net are in first cache line. (Assuming 64 bit arches and 64 bytes cache lines) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 9d0fcbaa..4b2b557 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -10,23 +10,25 @@ struct fib_rule { struct list_head list; - atomic_t refcnt; int iifindex; int oifindex; u32 mark; u32 mark_mask; - u32 pref; u32 flags; u32 table; - int suppress_ifgroup; - int suppress_prefixlen; u8 action; + /* 3 bytes hole, try to use */ u32 target; struct fib_rule __rcu *ctarget; + struct net *fr_net; + + atomic_t refcnt; + u32 pref; + int suppress_ifgroup; + int suppress_prefixlen; char iifname[IFNAMSIZ]; char oifname[IFNAMSIZ]; struct rcu_head rcu; - struct net * fr_net; }; struct fib_lookup_arg { -- cgit v0.10.2 From f27070158d6754765f2f5fd1617e8e42a0ea2318 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 1 Aug 2013 06:49:52 -0700 Subject: ax88179_178a: avoid copy of tx tcp packets ax88179_tx_fixup() has quite complex code trying to push 8 bytes of control data (len/mss), but fails to do it properly for TCP packets, incurring an extra copy and point of memory allocation failure. Lets use the simple and approved way. dev->needed_headroom being 8, all frames should have 8 bytes of headroom, so the extra copy should be unlikely anyway. This patch should improve performance for TCP xmits. Reported-by: Ming Lei Tested-by: Ming Lei Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 5a468f3..fb0caa2 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1169,31 +1169,18 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) int frame_size = dev->maxpacket; int mss = skb_shinfo(skb)->gso_size; int headroom; - int tailroom; tx_hdr1 = skb->len; tx_hdr2 = mss; if (((skb->len + 8) % frame_size) == 0) tx_hdr2 |= 0x80008000; /* Enable padding */ - headroom = skb_headroom(skb); - tailroom = skb_tailroom(skb); + headroom = skb_headroom(skb) - 8; - if (!skb_header_cloned(skb) && - !skb_cloned(skb) && - (headroom + tailroom) >= 8) { - if (headroom < 8) { - skb->data = memmove(skb->head + 8, skb->data, skb->len); - skb_set_tail_pointer(skb, skb->len); - } - } else { - struct sk_buff *skb2; - - skb2 = skb_copy_expand(skb, 8, 0, flags); + if ((skb_header_cloned(skb) || headroom < 0) && + pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) { dev_kfree_skb_any(skb); - skb = skb2; - if (!skb) - return NULL; + return NULL; } skb_push(skb, 4); -- cgit v0.10.2 From 762a3d89ebf5873f71b3839449ac6562049ef1ce Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Sun, 4 Aug 2013 17:19:38 -0700 Subject: bridge: fix rcu check warning in multicast port group Use of RCU here with out marked pointer and function doesn't match prototype with sparse. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 0daae3e..e4d5cd4 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -61,7 +61,8 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, for (i = 0; i < mdb->max; i++) { struct net_bridge_mdb_entry *mp; - struct net_bridge_port_group *p, **pp; + struct net_bridge_port_group *p; + struct net_bridge_port_group __rcu **pp; struct net_bridge_port *port; hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1514c9f..d41283c 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -466,7 +466,7 @@ extern void br_multicast_free_pg(struct rcu_head *head); extern struct net_bridge_port_group *br_multicast_new_port_group( struct net_bridge_port *port, struct br_ip *group, - struct net_bridge_port_group *next, + struct net_bridge_port_group __rcu *next, unsigned char state); extern void br_mdb_init(void); extern void br_mdb_uninit(void); -- cgit v0.10.2 From 5ca5461c3ee8b306c04ac833e5eacb5755b85d88 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Sun, 4 Aug 2013 17:17:39 -0700 Subject: vxlan: fix rcu related warning Vxlan remote list is protected by RCU and guaranteed to be non-empty. Split out the rcu and non-rcu access to the list to fix warning Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c3dd6e4..8bf31d9 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -177,9 +177,14 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port) /* First remote destination for a forwarding entry. * Guaranteed to be non-NULL because remotes are never deleted. */ -static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb) +static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) { - return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list); + return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); +} + +static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) +{ + return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); } /* Find VXLAN socket based on network namespace and UDP port */ @@ -297,7 +302,8 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan, if (skb == NULL) goto errout; - err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb)); + err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, + first_remote_rtnl(fdb)); if (err < 0) { /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -740,7 +746,7 @@ static bool vxlan_snoop(struct net_device *dev, f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { - struct vxlan_rdst *rdst = first_remote(f); + struct vxlan_rdst *rdst = first_remote_rcu(f); if (likely(rdst->remote_ip == src_ip)) return false; @@ -1005,7 +1011,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) } f = vxlan_find_mac(vxlan, n->ha); - if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) { + if (f && first_remote_rcu(f)->remote_ip == htonl(INADDR_ANY)) { /* bridge-local neighbor */ neigh_release(n); goto out; -- cgit v0.10.2 From e473fcb472574de978e47f980aeca510020a1286 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Wed, 26 Jun 2013 23:56:58 +0200 Subject: xfrm: constify mark argument of xfrm_find_acq() The mark argument is read only, so constify it. Also make dummy_mark in af_key const -- only used as dummy argument for this very function. Signed-off-by: Mathias Krause Cc: "David S. Miller" Cc: Herbert Xu Signed-off-by: Steffen Klassert diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 94ce082..89d3d8a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1548,7 +1548,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info); u32 xfrm_get_acqseq(void); extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); -struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark, +struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto, const xfrm_address_t *daddr, const xfrm_address_t *saddr, int create, diff --git a/net/key/af_key.c b/net/key/af_key.c index ab8bd2c..4089a21 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -45,7 +45,7 @@ struct netns_pfkey { static DEFINE_MUTEX(pfkey_mutex); #define DUMMY_MARK 0 -static struct xfrm_mark dummy_mark = {0, 0}; +static const struct xfrm_mark dummy_mark = {0, 0}; struct pfkey_sock { /* struct sock must be the first member of struct pfkey_sock */ struct sock sk; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 78f66fa..b2cd806 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -990,11 +990,13 @@ void xfrm_state_insert(struct xfrm_state *x) EXPORT_SYMBOL(xfrm_state_insert); /* xfrm_state_lock is held */ -static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, +static struct xfrm_state *__find_acq_core(struct net *net, + const struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, const xfrm_address_t *daddr, - const xfrm_address_t *saddr, int create) + const xfrm_address_t *saddr, + int create) { unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); struct xfrm_state *x; @@ -1399,9 +1401,9 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark, EXPORT_SYMBOL(xfrm_state_lookup_byaddr); struct xfrm_state * -xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto, - const xfrm_address_t *daddr, const xfrm_address_t *saddr, - int create, unsigned short family) +xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, + u8 proto, const xfrm_address_t *daddr, + const xfrm_address_t *saddr, int create, unsigned short family) { struct xfrm_state *x; -- cgit v0.10.2 From 8603b9556e1727f0de7e43ef448c85ff93347f27 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Wed, 26 Jun 2013 23:56:59 +0200 Subject: af_key: constify lookup tables The lookup tables for minimum sizes of extensions and for the pfkey handler functions are read only, therefore can be const. Signed-off-by: Mathias Krause Cc: "David S. Miller" Cc: Herbert Xu Signed-off-by: Steffen Klassert diff --git a/net/key/af_key.c b/net/key/af_key.c index 4089a21..d49f676 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -338,7 +338,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) return 0; } -static u8 sadb_ext_min_len[] = { +static const u8 sadb_ext_min_len[] = { [SADB_EXT_RESERVED] = (u8) 0, [SADB_EXT_SA] = (u8) sizeof(struct sadb_sa), [SADB_EXT_LIFETIME_CURRENT] = (u8) sizeof(struct sadb_lifetime), @@ -2737,7 +2737,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs); -static pfkey_handler pfkey_funcs[SADB_MAX + 1] = { +static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = { [SADB_RESERVED] = pfkey_reserved, [SADB_GETSPI] = pfkey_getspi, [SADB_UPDATE] = pfkey_add, -- cgit v0.10.2 From d8b3bfc253d8063fcce9c447ecc4cf3b1735b13a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 4 Aug 2013 11:37:29 +0200 Subject: netfilter: tproxy: fix build with IP6_NF_IPTABLES=n after commit 93742cf (netfilter: tproxy: remove nf_tproxy_core.h) CONFIG_IPV6=y CONFIG_IP6_NF_IPTABLES=n gives us: net/netfilter/xt_TPROXY.c: In function 'nf_tproxy_get_sock_v6': net/netfilter/xt_TPROXY.c:178:4: error: implicit declaration of function 'inet6_lookup_listener' Reported-by: kbuild test robot Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 851383a..5d8a3a3 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -161,7 +161,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, return sk; } -#if IS_ENABLED(CONFIG_IPV6) +#ifdef XT_TPROXY_HAVE_IPV6 static inline struct sock * nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, const struct in6_addr *saddr, const struct in6_addr *daddr, -- cgit v0.10.2 From 32270b61b3fcdce3495c7b746576d49f70587150 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 2 Aug 2013 09:15:47 +0200 Subject: ath10k: fix device teardown This fixes interrupt-related issue when no interfaces were running thus the device was considered powered down. The power_down() function isn't really powering down the device. It simply assumed it won't interrupt. This wasn't true in some cases and could lead to paging failures upon FW indication interrupt (i.e. FW crash) because some structures aren't allocated in that device state. One reason for that was that ar_pci->started wasn't reset. The other is interrupts should've been masked when teardown starts. The patch reorganized interrupt setup and makes sure ar_pci->started is reset accordingly. Reported-by: Ben Greear Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index d95439b..503e380 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -56,6 +56,8 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); static void ath10k_pci_stop_ce(struct ath10k *ar); static void ath10k_pci_device_reset(struct ath10k *ar); static int ath10k_pci_reset_target(struct ath10k *ar); +static int ath10k_pci_start_intr(struct ath10k *ar); +static void ath10k_pci_stop_intr(struct ath10k *ar); static const struct ce_attr host_ce_config_wlan[] = { /* host->target HTC control and raw streams */ @@ -1254,10 +1256,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar) } } +static void ath10k_pci_disable_irqs(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; + + for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) + disable_irq(ar_pci->pdev->irq + i); +} + static void ath10k_pci_hif_stop(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + /* Irqs are never explicitly re-enabled. They are implicitly re-enabled + * by ath10k_pci_start_intr(). */ + ath10k_pci_disable_irqs(ar); + ath10k_pci_stop_ce(ar); /* At this point, asynchronous threads are stopped, the target should @@ -1267,6 +1284,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_process_ce(ar); ath10k_pci_cleanup_ce(ar); ath10k_pci_buffer_cleanup(ar); + + ar_pci->started = 0; } static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, @@ -1742,6 +1761,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) { int ret; + ret = ath10k_pci_start_intr(ar); + if (ret) { + ath10k_err("could not start interrupt handling (%d)\n", ret); + goto err; + } + /* * Bring the target up cleanly. * @@ -1756,7 +1781,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ret = ath10k_pci_reset_target(ar); if (ret) - goto err; + goto err_irq; if (ath10k_target_ps) { ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n"); @@ -1787,12 +1812,15 @@ err_ce: err_ps: if (!ath10k_target_ps) ath10k_do_pci_sleep(ar); +err_irq: + ath10k_pci_stop_intr(ar); err: return ret; } static void ath10k_pci_hif_power_down(struct ath10k *ar) { + ath10k_pci_stop_intr(ar); ath10k_pci_ce_deinit(ar); if (!ath10k_target_ps) ath10k_do_pci_sleep(ar); @@ -2363,22 +2391,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->cacheline_sz = dma_get_cache_alignment(); - ret = ath10k_pci_start_intr(ar); - if (ret) { - ath10k_err("could not start interrupt handling (%d)\n", ret); - goto err_iomap; - } - ret = ath10k_core_register(ar); if (ret) { ath10k_err("could not register driver core (%d)\n", ret); - goto err_intr; + goto err_iomap; } return 0; -err_intr: - ath10k_pci_stop_intr(ar); err_iomap: pci_iounmap(pdev, mem); err_master: @@ -2415,7 +2435,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) tasklet_kill(&ar_pci->msi_fw_err); ath10k_core_unregister(ar); - ath10k_pci_stop_intr(ar); pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ar_pci->mem); -- cgit v0.10.2 From 7f4341fef3f2daa073fab5727d142a68b0edd37e Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 5 Aug 2013 18:00:04 +0900 Subject: bna: Staticize local functions bna_rx_sm_stop_wait_entry(), bna_rx_sm_rxf_stop_wait_entry(), bna_rx_sm_started_entry(), bna_rx_sm_cleanup_wait_entry(), and bna_rx_sm_cleanup_wait() are used only in this file. Fix the following sparse warnings: drivers/net/ethernet/brocade/bna/bna_tx_rx.c:1423:1: warning: symbol 'bna_rx_sm_stop_wait_entry' was not declared. Should it be static? drivers/net/ethernet/brocade/bna/bna_tx_rx.c:1476:1: warning: symbol 'bna_rx_sm_rxf_stop_wait_entry' was not declared. Should it be static? drivers/net/ethernet/brocade/bna/bna_tx_rx.c:1532:1: warning: symbol 'bna_rx_sm_started_entry' was not declared. Should it be static? drivers/net/ethernet/brocade/bna/bna_tx_rx.c:1597:1: warning: symbol 'bna_rx_sm_cleanup_wait_entry' was not declared. Should it be static? drivers/net/ethernet/brocade/bna/bna_tx_rx.c:1602:1: warning: symbol 'bna_rx_sm_cleanup_wait' was not declared. Should it be static? Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 57cd1bf..3c07064 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) bna_bfi_rx_enet_start(rx); } -void +static void bna_rx_sm_stop_wait_entry(struct bna_rx *rx) { } @@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) bna_rxf_start(&rx->rxf); } -void +static void bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) { } @@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) } } -void +static void bna_rx_sm_started_entry(struct bna_rx *rx) { struct bna_rxp *rxp; @@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, } } -void +static void bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) { } -void +static void bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) { switch (event) { -- cgit v0.10.2 From 4188e7df44bf316a691bf0efa260859d978c7e9d Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 5 Aug 2013 18:02:02 +0900 Subject: be2net: Staticize local functions These local functions are used only in this file. Fix the following sparse warnings: drivers/net/ethernet/emulex/benet/be_main.c:475:6: warning: symbol 'populate_erx_stats' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_main.c:1485:6: warning: symbol 'be_rx_compl_process_gro' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_main.c:2262:5: warning: symbol 'be_poll' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_main.c:3223:6: warning: symbol 'flash_cookie' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_main.c:3280:27: warning: symbol 'get_fsec_info' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_cmds.c:1013:5: warning: symbol 'be_cmd_mccq_ext_create' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_cmds.c:1071:5: warning: symbol 'be_cmd_mccq_org_create' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_cmds.c:3166:5: warning: symbol 'be_cmd_get_profile_config_mbox' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_cmds.c:3194:5: warning: symbol 'be_cmd_get_profile_config_mccq' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_roce.c:96:6: warning: symbol '_be_roce_dev_remove' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_roce.c:113:6: warning: symbol '_be_roce_dev_open' was not declared. Should it be static? drivers/net/ethernet/emulex/benet/be_roce.c:129:6: warning: symbol '_be_roce_dev_close' was not declared. Should it be static? Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 613d887..2bd2d1e 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1010,9 +1010,9 @@ static u32 be_encoded_q_len(int q_len) return len_encoded; } -int be_cmd_mccq_ext_create(struct be_adapter *adapter, - struct be_queue_info *mccq, - struct be_queue_info *cq) +static int be_cmd_mccq_ext_create(struct be_adapter *adapter, + struct be_queue_info *mccq, + struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_mcc_ext_create *req; @@ -1068,9 +1068,9 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter, return status; } -int be_cmd_mccq_org_create(struct be_adapter *adapter, - struct be_queue_info *mccq, - struct be_queue_info *cq) +static int be_cmd_mccq_org_create(struct be_adapter *adapter, + struct be_queue_info *mccq, + struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_mcc_create *req; @@ -3163,8 +3163,8 @@ err: } /* Uses mbox */ -int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, - u8 domain, struct be_dma_mem *cmd) +static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, + u8 domain, struct be_dma_mem *cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_profile_config *req; @@ -3191,8 +3191,8 @@ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, } /* Uses sync mcc */ -int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, - u8 domain, struct be_dma_mem *cmd) +static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, + u8 domain, struct be_dma_mem *cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_profile_config *req; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3df1503..4c40e3e 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -472,7 +472,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) ACCESS_ONCE(*acc) = newacc; } -void populate_erx_stats(struct be_adapter *adapter, +static void populate_erx_stats(struct be_adapter *adapter, struct be_rx_obj *rxo, u32 erx_stat) { @@ -1482,8 +1482,9 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, } /* Process the RX completion indicated by rxcp when GRO is enabled */ -void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi, - struct be_rx_compl_info *rxcp) +static void be_rx_compl_process_gro(struct be_rx_obj *rxo, + struct napi_struct *napi, + struct be_rx_compl_info *rxcp) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info; @@ -2259,7 +2260,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, return (work_done < budget); /* Done */ } -int be_poll(struct napi_struct *napi, int budget) +static int be_poll(struct napi_struct *napi, int budget) { struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); struct be_adapter *adapter = eqo->adapter; @@ -3220,7 +3221,7 @@ static void be_netpoll(struct net_device *netdev) #endif #define FW_FILE_HDR_SIGN "ServerEngines Corp. " -char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; +static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; static bool be_flash_redboot(struct be_adapter *adapter, const u8 *p, u32 img_start, int image_size, @@ -3277,7 +3278,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter, } -struct flash_section_info *get_fsec_info(struct be_adapter *adapter, +static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, int header_size, const struct firmware *fw) { diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index f3d126d..645e846 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -93,7 +93,7 @@ void be_roce_dev_add(struct be_adapter *adapter) } } -void _be_roce_dev_remove(struct be_adapter *adapter) +static void _be_roce_dev_remove(struct be_adapter *adapter) { if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev) ocrdma_drv->remove(adapter->ocrdma_dev); @@ -110,7 +110,7 @@ void be_roce_dev_remove(struct be_adapter *adapter) } } -void _be_roce_dev_open(struct be_adapter *adapter) +static void _be_roce_dev_open(struct be_adapter *adapter) { if (ocrdma_drv && adapter->ocrdma_dev && ocrdma_drv->state_change_handler) @@ -126,7 +126,7 @@ void be_roce_dev_open(struct be_adapter *adapter) } } -void _be_roce_dev_close(struct be_adapter *adapter) +static void _be_roce_dev_close(struct be_adapter *adapter) { if (ocrdma_drv && adapter->ocrdma_dev && ocrdma_drv->state_change_handler) -- cgit v0.10.2 From 51a700db96367b079edd19988c8f91ddb33ec987 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 5 Aug 2013 18:03:00 +0900 Subject: net: micrel: Staticize local functions These local functions are used only in this file. Fix the following sparse warnings: drivers/net/ethernet/micrel/ks8842.c:708:6: warning: symbol 'ks8842_handle_rx' was not declared. Should it be static? drivers/net/ethernet/micrel/ks8842.c:718:6: warning: symbol 'ks8842_handle_tx' was not declared. Should it be static? drivers/net/ethernet/micrel/ks8842.c:727:6: warning: symbol 'ks8842_handle_rx_overrun' was not declared. Should it be static? drivers/net/ethernet/micrel/ks8842.c:735:6: warning: symbol 'ks8842_tasklet' was not declared. Should it be static? drivers/net/ethernet/micrel/ks8851_mll.c:691:6: warning: symbol 'ks_enable_qmu' was not declared. Should it be static? Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index e393d99..94b3bd6 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -705,7 +705,8 @@ static void ks8842_rx_frame(struct net_device *netdev, ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); } -void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) +static void ks8842_handle_rx(struct net_device *netdev, + struct ks8842_adapter *adapter) { u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); @@ -715,7 +716,8 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) } } -void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) +static void ks8842_handle_tx(struct net_device *netdev, + struct ks8842_adapter *adapter) { u16 sr = ks8842_read16(adapter, 16, REG_TXSR); netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); @@ -724,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) netif_wake_queue(netdev); } -void ks8842_handle_rx_overrun(struct net_device *netdev, +static void ks8842_handle_rx_overrun(struct net_device *netdev, struct ks8842_adapter *adapter) { netdev_dbg(netdev, "%s: entry\n", __func__); @@ -732,7 +734,7 @@ void ks8842_handle_rx_overrun(struct net_device *netdev, netdev->stats.rx_fifo_errors++; } -void ks8842_tasklet(unsigned long arg) +static void ks8842_tasklet(unsigned long arg) { struct net_device *netdev = (struct net_device *)arg; struct ks8842_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index ac20098..9f3f5db 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -688,7 +688,7 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op) } -void ks_enable_qmu(struct ks_net *ks) +static void ks_enable_qmu(struct ks_net *ks) { u16 w; -- cgit v0.10.2 From f094668ca1a3e326e4f3d75a5e3b32f949f417b8 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 5 Aug 2013 18:04:51 +0900 Subject: net: mlx4: Staticize local functions These local functions are used only in this file. Fix the following sparse warnings: drivers/net/ethernet/mellanox/mlx4/cmd.c:803:5: warning: symbol 'MLX4_CMD_UPDATE_QP_wrapper' was not declared. Should it be static? drivers/net/ethernet/mellanox/mlx4/cmd.c:812:5: warning: symbol 'MLX4_CMD_GET_OP_REQ_wrapper' was not declared. Should it be static? drivers/net/ethernet/mellanox/mlx4/cmd.c:1547:5: warning: symbol 'mlx4_master_immediate_activate_vlan_qos' was not declared. Should it be static? Signed-off-by: Jingoo Han Acked-By: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 141322c..ea20182 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -800,7 +800,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } -int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, +static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, @@ -809,7 +809,7 @@ int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, return -EPERM; } -int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave, +static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, @@ -1544,7 +1544,7 @@ static int calculate_transition(u16 oper_vlan, u16 admin_vlan) return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); } -int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, +static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, int slave, int port) { struct mlx4_vport_oper_state *vp_oper; -- cgit v0.10.2 From 1155e964a616a9bd4b603f6f8bf4f47576b010c5 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 5 Aug 2013 12:42:00 +0800 Subject: tile: fix missing unlock on error in tile_net_open() Add the missing unlock before return from function tile_net_open() in the error handling case. Introduced by commit f3286a3af89d6db7a488f3e8f02b98d67d50f00c. (tile: support multiple mPIPE shims in tilegx network driver) Signed-off-by: Wei Yongjun Acked-by: Chris Metcalf Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 907b577..5d2a719 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -1510,8 +1510,10 @@ static int tile_net_open(struct net_device *dev) /* Get the instance info. */ rc = gxio_mpipe_link_instance(dev->name); - if (rc < 0 || rc >= NR_MPIPE_MAX) + if (rc < 0 || rc >= NR_MPIPE_MAX) { + mutex_unlock(&tile_net_devs_for_channel_mutex); return -EIO; + } priv->instance = rc; instance = rc; -- cgit v0.10.2 From fd7f838731a8b5f7567305dfc4c032c9ea135127 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:52 +0530 Subject: ath9k: Add information about antenna diversity Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 291ca01..4cd2bce 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -16,6 +16,58 @@ #include "ath9k.h" +/* + * AR9285 + * ====== + * + * EEPROM has 2 4-bit fields containing the card configuration. + * + * antdiv_ctl1: + * ------------ + * bb_enable_ant_div_lnadiv : 1 + * bb_ant_div_alt_gaintb : 1 + * bb_ant_div_main_gaintb : 1 + * bb_enable_ant_fast_div : 1 + * + * antdiv_ctl2: + * ----------- + * bb_ant_div_alt_lnaconf : 2 + * bb_ant_div_main_lnaconf : 2 + * + * The EEPROM bits are used as follows: + * ------------------------------------ + * + * bb_enable_ant_div_lnadiv - Enable LNA path rx antenna diversity/combining. + * Set in AR_PHY_MULTICHAIN_GAIN_CTL. + * + * bb_ant_div_[alt/main]_gaintb - 0 -> Antenna config Alt/Main uses gaintable 0 + * 1 -> Antenna config Alt/Main uses gaintable 1 + * Set in AR_PHY_MULTICHAIN_GAIN_CTL. + * + * bb_enable_ant_fast_div - Enable fast antenna diversity. + * Set in AR_PHY_CCK_DETECT. + * + * bb_ant_div_[alt/main]_lnaconf - Alt/Main LNA diversity/combining input config. + * Set in AR_PHY_MULTICHAIN_GAIN_CTL. + * 10=LNA1 + * 01=LNA2 + * 11=LNA1+LNA2 + * 00=LNA1-LNA2 + * + * AR9485 / AR9565 / AR9331 + * ======================== + * + * The same bits are present in the EEPROM, but the location in the + * EEPROM is different (ant_div_control in ar9300_BaseExtension_1). + * + * ant_div_alt_lnaconf ==> bit 0~1 + * ant_div_main_lnaconf ==> bit 2~3 + * ant_div_alt_gaintb ==> bit 4 + * ant_div_main_gaintb ==> bit 5 + * enable_ant_div_lnadiv ==> bit 6 + * enable_ant_fast_div ==> bit 7 + */ + static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, int mindelta, int main_rssi_avg, int alt_rssi_avg, int pkt_count) -- cgit v0.10.2 From f85c3371aee070a65d837afdd5377e9dc66fbd52 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:53 +0530 Subject: ath9k: Print LNA combining mode during init Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index d55d97c..8bd602c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2559,34 +2559,28 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; - if (AR_SREV_9285(ah)) + if (AR_SREV_9285(ah)) { if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); - if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) + if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) { pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; + ath_info(common, "Enable LNA combining\n"); + } } + } + if (AR_SREV_9300_20_OR_LATER(ah)) { if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE)) pCap->hw_caps |= ATH9K_HW_CAP_APM; } - if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); - /* - * enable the diversity-combining algorithm only when - * both enable_lna_div and enable_fast_div are set - * Table for Diversity - * ant_div_alt_lnaconf bit 0-1 - * ant_div_main_lnaconf bit 2-3 - * ant_div_alt_gaintb bit 4 - * ant_div_main_gaintb bit 5 - * enable_ant_div_lnadiv bit 6 - * enable_ant_fast_div bit 7 - */ - if ((ant_div_ctl1 >> 0x6) == 0x3) + if ((ant_div_ctl1 >> 0x6) == 0x3) { pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; + ath_info(common, "Enable LNA combining\n"); + } } if (ath9k_hw_dfs_tested(ah)) -- cgit v0.10.2 From 3afa6b4f54181e88814680e71b2f12fae64057f4 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:54 +0530 Subject: ath9k: Fix antenna diversity for CUS198 CUS198/CUS230 need a few tweaks in the antenna diversity algorithm to accomodate RSSI variation. Add a couple of knobs to control low RSSI threshold and fast antenna diversity bias values. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 4cd2bce..8de314c 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -68,16 +68,27 @@ * enable_ant_fast_div ==> bit 7 */ -static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, +static inline bool ath_is_alt_ant_ratio_better(struct ath_ant_comb *antcomb, + int alt_ratio, int maxdelta, int mindelta, int main_rssi_avg, int alt_rssi_avg, int pkt_count) { - return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && - (alt_rssi_avg > main_rssi_avg + maxdelta)) || - (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); + if (pkt_count <= 50) + return false; + + if (alt_rssi_avg > main_rssi_avg + mindelta) + return true; + + if (alt_ratio >= antcomb->ant_ratio2 && + alt_rssi_avg >= antcomb->low_rssi_thresh && + (alt_rssi_avg > main_rssi_avg + maxdelta)) + return true; + + return false; } static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf, + struct ath_ant_comb *antcomb, int alt_ratio, int alt_rssi_avg, int main_rssi_avg) { @@ -100,20 +111,22 @@ static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf, break; case 1: case 2: - if (alt_rssi_avg < 4) + if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh) break; if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) || - (set2 && (alt_rssi_avg >= (main_rssi_avg - 2)))) + (set2 && (alt_rssi_avg >= (main_rssi_avg - 2))) || + (alt_ratio > antcomb->ant_ratio)) result = true; break; case 3: - if (alt_rssi_avg < 4) + if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh) break; if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) || - (set2 && (alt_rssi_avg >= (main_rssi_avg + 3)))) + (set2 && (alt_rssi_avg >= (main_rssi_avg + 3))) || + (alt_ratio > antcomb->ant_ratio)) result = true; break; @@ -266,7 +279,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { /* main is LNA1 */ - if (ath_is_alt_ant_ratio_better(alt_ratio, + if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_HI, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, @@ -275,7 +288,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, else antcomb->first_ratio = false; } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { - if (ath_is_alt_ant_ratio_better(alt_ratio, + if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_MID, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, @@ -284,7 +297,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, else antcomb->first_ratio = false; } else { - if (ath_is_alt_ant_ratio_better(alt_ratio, + if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 0, main_rssi_avg, alt_rssi_avg, @@ -325,7 +338,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { - if (ath_is_alt_ant_ratio_better(alt_ratio, + if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_HI, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, @@ -334,7 +347,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, else antcomb->second_ratio = false; } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { - if (ath_is_alt_ant_ratio_better(alt_ratio, + if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_MID, ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, main_rssi_avg, alt_rssi_avg, @@ -343,7 +356,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, else antcomb->second_ratio = false; } else { - if (ath_is_alt_ant_ratio_better(alt_ratio, + if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio, ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 0, main_rssi_avg, alt_rssi_avg, @@ -484,8 +497,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, ant_conf->fast_div_bias = 0x1; break; case 0x10: /* LNA2 A-B */ - if (!(antcomb->scan) && - (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) + if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; @@ -494,15 +506,13 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, ant_conf->fast_div_bias = 0x1; break; case 0x13: /* LNA2 A+B */ - if (!(antcomb->scan) && - (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) + if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; break; case 0x20: /* LNA1 A-B */ - if (!(antcomb->scan) && - (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) + if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; @@ -511,8 +521,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, ant_conf->fast_div_bias = 0x1; break; case 0x23: /* LNA1 A+B */ - if (!(antcomb->scan) && - (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) + if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio)) ant_conf->fast_div_bias = 0x1; else ant_conf->fast_div_bias = 0x2; @@ -529,6 +538,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, default: break; } + + if (antcomb->fast_div_bias) + ant_conf->fast_div_bias = antcomb->fast_div_bias; } else if (ant_conf->div_group == 3) { switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { @@ -668,13 +680,14 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb, } static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf, + struct ath_ant_comb *antcomb, int alt_ratio, int alt_rssi_avg, int main_rssi_avg, int curr_main_set, int curr_alt_set) { bool ret = false; - if (ath_ant_div_comb_alt_check(div_ant_conf, alt_ratio, + if (ath_ant_div_comb_alt_check(div_ant_conf, antcomb, alt_ratio, alt_rssi_avg, main_rssi_avg)) { if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { /* @@ -718,7 +731,7 @@ static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb) if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { alt_ratio = ((antcomb->alt_recv_cnt * 100) / antcomb->total_pkt_count); - if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) + if (alt_ratio < antcomb->ant_ratio) return true; } @@ -741,6 +754,14 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & ATH_ANT_RX_MASK; + if (alt_rssi >= antcomb->low_rssi_thresh) { + antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO; + antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2; + } else { + antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI; + antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI; + } + /* Record packet only when both main_rssi and alt_rssi is positive */ if (main_rssi > 0 && alt_rssi > 0) { antcomb->total_pkt_count++; @@ -783,7 +804,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) antcomb->count++; if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { - if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { + if (alt_ratio > antcomb->ant_ratio) { ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, main_rssi_avg); antcomb->alt_good = true; @@ -797,7 +818,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) } if (!antcomb->scan) { - ret = ath_ant_try_switch(&div_ant_conf, alt_ratio, + ret = ath_ant_try_switch(&div_ant_conf, antcomb, alt_ratio, alt_rssi_avg, main_rssi_avg, curr_main_set, curr_alt_set); if (ret) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index ed8f8ef..74009f1 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -585,6 +585,8 @@ static inline void ath_fill_led_pin(struct ath_softc *sc) #define ATH_ANT_DIV_COMB_MAX_COUNT 100 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20 +#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50 +#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50 #define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1 #define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4 @@ -607,6 +609,8 @@ struct ath_ant_comb { int rssi_first; int rssi_second; int rssi_third; + int ant_ratio; + int ant_ratio2; bool alt_good; int quick_scan_cnt; enum ath9k_ant_div_comb_lna_conf main_conf; @@ -615,6 +619,12 @@ struct ath_ant_comb { bool first_ratio; bool second_ratio; unsigned long scan_start_time; + + /* + * Card-specific config values. + */ + int low_rssi_thresh; + int fast_div_bias; }; void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 16f8b20..7ded9d6 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -525,6 +525,8 @@ static void ath9k_init_platform(struct ath_softc *sc) ATH9K_PCI_CUS230)) { ah->config.xlna_gpio = 9; ah->config.xatten_margin_cfg = true; + sc->ant_comb.low_rssi_thresh = 20; + sc->ant_comb.fast_div_bias = 3; ath_info(common, "Set parameters for %s\n", (sc->driver_data & ATH9K_PCI_CUS198) ? -- cgit v0.10.2 From 6308130542e2ad913b678436c8f7b833e1420d65 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:55 +0530 Subject: ath9k: Cleanup WLAN/BT RX diversity For single-chain WLAN+BT cards, the BT antenna can be used for WLAN RX when the BT interface is disabled. Rename the modparam "antenna_diversity" to "bt_ant_diversity" to clarify this. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index daeafef..e0ba7cd 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -159,7 +159,7 @@ struct ath_common { bool btcoex_enabled; bool disable_ani; - bool antenna_diversity; + bool bt_ant_diversity; }; struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 8de314c..354dc66 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -887,6 +887,6 @@ void ath_ant_comb_update(struct ath_softc *sc) ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf); - if (common->antenna_diversity) + if (common->bt_ant_diversity) ath9k_hw_antctrl_shared_chain_lnadiv(ah, true); } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index e744d97..881ca6c 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -270,25 +270,26 @@ static const struct file_operations fops_ani = { .llseek = default_llseek, }; -static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t read_file_bt_ant_diversity(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); char buf[32]; unsigned int len; - len = sprintf(buf, "%d\n", common->antenna_diversity); + len = sprintf(buf, "%d\n", common->bt_ant_diversity); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } -static ssize_t write_file_ant_diversity(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t write_file_bt_ant_diversity(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - unsigned long antenna_diversity; + unsigned long bt_ant_diversity; char buf[32]; ssize_t len; @@ -296,26 +297,23 @@ static ssize_t write_file_ant_diversity(struct file *file, if (copy_from_user(buf, user_buf, len)) return -EFAULT; - if (!AR_SREV_9565(sc->sc_ah)) - goto exit; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &antenna_diversity)) + if (kstrtoul(buf, 0, &bt_ant_diversity)) return -EINVAL; - common->antenna_diversity = !!antenna_diversity; + common->bt_ant_diversity = !!bt_ant_diversity; ath9k_ps_wakeup(sc); ath_ant_comb_update(sc); - ath_dbg(common, CONFIG, "Antenna diversity: %d\n", - common->antenna_diversity); + ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n", + common->bt_ant_diversity); ath9k_ps_restore(sc); -exit: + return count; } -static const struct file_operations fops_ant_diversity = { - .read = read_file_ant_diversity, - .write = write_file_ant_diversity, +static const struct file_operations fops_bt_ant_diversity = { + .read = read_file_bt_ant_diversity, + .write = write_file_bt_ant_diversity, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, @@ -1933,8 +1931,8 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); - debugfs_create_file("diversity", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_ant_diversity); + debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity); debugfs_create_file("antenna_diversity", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_antenna_diversity); #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 7ded9d6..5e81b2c 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -53,9 +53,9 @@ static int ath9k_btcoex_enable; module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); -static int ath9k_enable_diversity; -module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444); -MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565"); +static int ath9k_bt_ant_diversity; +module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444); +MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity"); bool is_ath9k_unloaded; /* We use the hw_value as an index into our private channel structure */ @@ -633,11 +633,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, ath9k_init_platform(sc); /* - * Enable Antenna diversity only when BTCOEX is disabled + * Enable WLAN/BT RX Antenna diversity only when BTCOEX is enabled * and the user manually requests the feature. */ - if (!common->btcoex_enabled && ath9k_enable_diversity) - common->antenna_diversity = 1; + if (common->btcoex_enabled && ath9k_bt_ant_diversity) + common->bt_ant_diversity = 1; spin_lock_init(&common->cc_lock); -- cgit v0.10.2 From 3f2da95517029d88365a074ef81a928a99871964 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:56 +0530 Subject: ath9k: Add a HW capability for WLAN/BT RX diversity Make use of this capability to restrict the usage of the debugfs file and modparam using which this feature can be enabled. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 74009f1..18898ab 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -634,10 +634,11 @@ void ath_ant_comb_update(struct ath_softc *sc); /* Main driver core */ /********************/ -#define ATH9K_PCI_CUS198 0x0001 -#define ATH9K_PCI_CUS230 0x0002 -#define ATH9K_PCI_CUS217 0x0004 -#define ATH9K_PCI_WOW 0x0008 +#define ATH9K_PCI_CUS198 0x0001 +#define ATH9K_PCI_CUS230 0x0002 +#define ATH9K_PCI_CUS217 0x0004 +#define ATH9K_PCI_WOW 0x0008 +#define ATH9K_PCI_BT_ANT_DIV 0x0010 /* * Default cache line size, in bytes. diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 881ca6c..daa316b 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -289,6 +289,7 @@ static ssize_t write_file_bt_ant_diversity(struct file *file, { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps; unsigned long bt_ant_diversity; char buf[32]; ssize_t len; @@ -297,6 +298,9 @@ static ssize_t write_file_bt_ant_diversity(struct file *file, if (copy_from_user(buf, user_buf, len)) return -EFAULT; + if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV)) + goto exit; + buf[len] = '\0'; if (kstrtoul(buf, 0, &bt_ant_diversity)) return -EINVAL; @@ -307,7 +311,7 @@ static ssize_t write_file_bt_ant_diversity(struct file *file, ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n", common->bt_ant_diversity); ath9k_ps_restore(sc); - +exit: return count; } diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index fd009e5..4ab8f58 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -248,6 +248,7 @@ enum ath9k_hw_caps { ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), ATH9K_HW_CAP_PAPRD = BIT(18), ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19), + ATH9K_HW_CAP_BT_ANT_DIV = BIT(20), }; /* diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 5e81b2c..b678add 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -516,6 +516,7 @@ static void ath9k_init_misc(struct ath_softc *sc) static void ath9k_init_platform(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; + struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); if (common->bus_ops->ath_bus_type != ATH_PCI) @@ -531,8 +532,14 @@ static void ath9k_init_platform(struct ath_softc *sc) ath_info(common, "Set parameters for %s\n", (sc->driver_data & ATH9K_PCI_CUS198) ? "CUS198" : "CUS230"); - } else if (sc->driver_data & ATH9K_PCI_CUS217) { + } + + if (sc->driver_data & ATH9K_PCI_CUS217) ath_info(common, "CUS217 card detected\n"); + + if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) { + pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; + ath_info(common, "Set BT/WLAN RX diversity capability\n"); } } @@ -586,6 +593,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, { struct ath9k_platform_data *pdata = sc->dev->platform_data; struct ath_hw *ah = NULL; + struct ath9k_hw_capabilities *pCap; struct ath_common *common; int ret = 0, i; int csz = 0; @@ -602,6 +610,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, ah->reg_ops.rmw = ath9k_reg_rmw; atomic_set(&ah->intr_ref_cnt, -1); sc->sc_ah = ah; + pCap = &ah->caps; sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET); @@ -633,10 +642,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, ath9k_init_platform(sc); /* - * Enable WLAN/BT RX Antenna diversity only when BTCOEX is enabled - * and the user manually requests the feature. + * Enable WLAN/BT RX Antenna diversity only when: + * + * - BTCOEX is enabled + * - the user manually requests the feature. + * - the HW cap is set using the platform data. */ - if (common->btcoex_enabled && ath9k_bt_ant_diversity) + if (common->btcoex_enabled && ath9k_bt_ant_diversity && + (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV)) common->bt_ant_diversity = 1; spin_lock_init(&common->cc_lock); -- cgit v0.10.2 From d8d7744b72b56587b39fb4ecc2cbfb3d8cb734a4 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:57 +0530 Subject: ath9k: Rename ath9k_hw_antctrl_shared_chain_lnadiv Use "ath9k_hw_set_bt_ant_diversity" instead. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 354dc66..85391e6 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -888,5 +888,5 @@ void ath_ant_comb_update(struct ath_softc *sc) ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf); if (common->bt_ant_diversity) - ath9k_hw_antctrl_shared_chain_lnadiv(ah, true); + ath9k_hw_set_bt_ant_diversity(ah, true); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 3ec33ce..f9fe9c8 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1412,8 +1412,7 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); } -static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah, - bool enable) +static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) { u8 ant_div_ctl1; u32 regval; @@ -1646,7 +1645,7 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah) ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set; - ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv; + ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity; ops->spectral_scan_config = ar9003_hw_spectral_scan_config; ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger; ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait; diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index 14b7011..a78d48c 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -78,11 +78,10 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah, ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf); } -static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah, - bool enable) +static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) { - if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv) - ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable); + if (ath9k_hw_ops(ah)->set_bt_ant_diversity) + ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable); } /* Private hardware call ops */ diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 4ab8f58..c037718 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -718,7 +718,7 @@ struct ath_hw_ops { struct ath_hw_antcomb_conf *antconf); void (*antdiv_comb_conf_set)(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf); - void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable); + void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable); void (*spectral_scan_config)(struct ath_hw *ah, struct ath_spec_scan *param); void (*spectral_scan_trigger)(struct ath_hw *ah); -- cgit v0.10.2 From a5354ccaaf54ac61c6d1b350e8d3e4234dd28849 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:58 +0530 Subject: ath9k: Enable WLAN/BT Ant Diversity for WB225/WB195 A custom solution for Asus is WB195 based and supports WLAN/BT Rx diversity. Identify this card and set the capability. CUS198/CUS230, which are based on WB225 also support WLAN/BT Rx diversity. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index c585c9b..30652b4 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -29,6 +29,14 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ + + /* AR9285 card for Asus */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002B, + PCI_VENDOR_ID_AZWAVE, + 0x2C37), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ @@ -40,29 +48,29 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 0x0032, PCI_VENDOR_ID_AZWAVE, 0x2086), - .driver_data = ATH9K_PCI_CUS198 }, + .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x0032, PCI_VENDOR_ID_AZWAVE, 0x1237), - .driver_data = ATH9K_PCI_CUS198 }, + .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x0032, PCI_VENDOR_ID_AZWAVE, 0x2126), - .driver_data = ATH9K_PCI_CUS198 }, + .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV }, /* PCI-E CUS230 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x0032, PCI_VENDOR_ID_AZWAVE, 0x2152), - .driver_data = ATH9K_PCI_CUS230 }, + .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x0032, PCI_VENDOR_ID_FOXCONN, 0xE075), - .driver_data = ATH9K_PCI_CUS230 }, + .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV }, { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ -- cgit v0.10.2 From d7150908248bc76eff48c16045802c7f0b1cfd74 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:21:59 +0530 Subject: ath9k: Program HW for WB195 diversity The MC_GAIN_CTL/CCK_DETECT registers have to be programmed with the correct configuration values if WLAN/BT RX diversity is enabled. Add this and also take care of the BTCOEX mode when fast diversity is enabled/disabled. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index f400351..456d8b9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -555,6 +555,65 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); } +static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) +{ + struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; + u8 antdiv_ctrl1, antdiv_ctrl2; + u32 regval; + + if (enable) { + antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE; + antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE; + + /* + * Don't disable BT ant to allow BB to control SWCOM. + */ + btcoex->bt_coex_mode2 &= (~(AR_BT_DISABLE_BT_ANT)); + REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2); + + REG_WRITE(ah, AR_PHY_SWITCH_COM, ATH_BT_COEX_ANT_DIV_SWITCH_COM); + REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000); + } else { + /* + * Disable antenna diversity, use LNA1 only. + */ + antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A; + antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A; + + /* + * Disable BT Ant. to allow concurrent BT and WLAN receive. + */ + btcoex->bt_coex_mode2 |= AR_BT_DISABLE_BT_ANT; + REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2); + + /* + * Program SWCOM table to make sure RF switch always parks + * at BT side. + */ + REG_WRITE(ah, AR_PHY_SWITCH_COM, 0); + REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000); + } + + regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); + regval &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL)); + /* + * Clear ant_fast_div_bias [14:9] since for WB195, + * the main LNA is always LNA1. + */ + regval &= (~(AR_PHY_9285_FAST_DIV_BIAS)); + regval |= SM(antdiv_ctrl1, AR_PHY_9285_ANT_DIV_CTL); + regval |= SM(antdiv_ctrl2, AR_PHY_9285_ANT_DIV_ALT_LNACONF); + regval |= SM((antdiv_ctrl2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF); + regval |= SM((antdiv_ctrl1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB); + regval |= SM((antdiv_ctrl1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB); + REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); + + regval = REG_READ(ah, AR_PHY_CCK_DETECT); + regval &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); + regval |= SM((antdiv_ctrl1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); + REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); +} + static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, struct ath_spec_scan *param) { @@ -630,6 +689,7 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah) ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set; + ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity; ops->spectral_scan_config = ar9002_hw_spectral_scan_config; ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger; ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h index d3f0928..6314ae2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h @@ -320,6 +320,12 @@ #define AR_PHY_9285_ANT_DIV_GAINTB_0 0 #define AR_PHY_9285_ANT_DIV_GAINTB_1 1 +#define ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE 0x0b +#define ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE 0x09 +#define ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A 0x04 +#define ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A 0x09 +#define ATH_BT_COEX_ANT_DIV_SWITCH_COM 0x66666666 + #define AR_PHY_EXT_CCA0 0x99b8 #define AR_PHY_EXT_CCA0_THRESH62 0x000000FF #define AR_PHY_EXT_CCA0_THRESH62_S 0 -- cgit v0.10.2 From 7bdea96a1bbae75e7922584e3ae37fb9ad6cb79a Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:22:00 +0530 Subject: ath9k: Remove "shared_chain_lnadiv" This variable is redundant since we can use common->bt_ant_diversity to determine if diversity has to be enabled/disabled. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index a98e6a3..c2f1f18 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3561,6 +3561,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain, static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) { + struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_capabilities *pCap = &ah->caps; int chain; u32 regval, value, gpio; @@ -3646,7 +3647,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S; if (AR_SREV_9565(ah)) { - if (ah->shared_chain_lnadiv) { + if (common->bt_ant_diversity) { regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S); } else { regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index f9fe9c8..55021f1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1420,7 +1420,6 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) if (!AR_SREV_9565(ah)) return; - ah->shared_chain_lnadiv = enable; ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8bd602c..151443b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2056,7 +2056,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_apply_gpio_override(ah); - if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv) + if (AR_SREV_9565(ah) && common->bt_ant_diversity) REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON); return 0; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index c037718..9dc6773 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -767,7 +767,6 @@ struct ath_hw { bool aspm_enabled; bool is_monitoring; bool need_an_top2_fixup; - bool shared_chain_lnadiv; u16 tx_trig_level; u32 nf_regs[6]; -- cgit v0.10.2 From 31fd216db9cb7a50e0e64aff813bc6c12e9437d3 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:22:01 +0530 Subject: ath9k: Set SWCOM value for CUS198 CUS198/CUS230 cards require a custom value to be programmed into the SWCOM register. Assign this during init time. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9dc6773..38f461c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -312,6 +312,7 @@ struct ath9k_ops_config { /* Platform specific config */ u32 xlna_gpio; + u32 ant_ctrl_comm2g_switch_enable; bool xatten_margin_cfg; }; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index b678add..4afe30e 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -526,6 +526,7 @@ static void ath9k_init_platform(struct ath_softc *sc) ATH9K_PCI_CUS230)) { ah->config.xlna_gpio = 9; ah->config.xatten_margin_cfg = true; + ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88; sc->ant_comb.low_rssi_thresh = 20; sc->ant_comb.fast_div_bias = 3; -- cgit v0.10.2 From 84893817aa382b923e3d7491e7081eeb6d9ec213 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 4 Aug 2013 14:22:02 +0530 Subject: ath9k: Support ANT diversity for WB225 WB225 based cards like CUS198 and CUS230 support both fast antenna diversity and LNA combining. Add support for this and also program the SWCOM register with the correct "ant_ctrl_comm2g_switch_enable" value. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index c2f1f18..178052f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3541,13 +3541,12 @@ static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz) return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt); } - -static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz) +u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz) { return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon); } -static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz) +u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz) { return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 874f657..75d4fb4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -334,6 +334,8 @@ struct ar9300_eeprom { s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah); s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah); +u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz); +u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz); u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 55021f1..4898829 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1414,58 +1414,102 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah, static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) { + struct ath9k_hw_capabilities *pCap = &ah->caps; u8 ant_div_ctl1; u32 regval; - if (!AR_SREV_9565(ah)) + if (!AR_SREV_9485(ah) && !AR_SREV_9565(ah)) return; + if (AR_SREV_9485(ah)) { + regval = ar9003_hw_ant_ctrl_common_2_get(ah, + IS_CHAN_2GHZ(ah->curchan)); + if (enable) { + regval &= ~AR_SWITCH_TABLE_COM2_ALL; + regval |= ah->config.ant_ctrl_comm2g_switch_enable; + } + REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, + AR_SWITCH_TABLE_COM2_ALL, regval); + } + ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); + /* + * Set MAIN/ALT LNA conf. + * Set MAIN/ALT gain_tb. + */ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); regval &= (~AR_ANT_DIV_CTRL_ALL); regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; - regval &= ~AR_PHY_ANT_DIV_LNADIV; - regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S; - - if (enable) - regval |= AR_ANT_DIV_ENABLE; - REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); - regval = REG_READ(ah, AR_PHY_CCK_DETECT); - regval &= ~AR_FAST_DIV_ENABLE; - regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; - - if (enable) - regval |= AR_FAST_DIV_ENABLE; - - REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); - - if (enable) { - REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL, - (1 << AR_PHY_ANT_SW_RX_PROT_S)); - if (ah->curchan && IS_CHAN_2GHZ(ah->curchan)) - REG_SET_BIT(ah, AR_PHY_RESTART, - AR_PHY_RESTART_ENABLE_DIV_M2FLAG); - REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, - AR_BTCOEX_WL_LNADIV_FORCE_ON); - } else { - REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE); - REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, - (1 << AR_PHY_ANT_SW_RX_PROT_S)); - REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE); - REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV, - AR_BTCOEX_WL_LNADIV_FORCE_ON); - + if (AR_SREV_9485_11(ah)) { + /* + * Enable LNA diversity. + */ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); - regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF | - AR_PHY_ANT_DIV_ALT_LNACONF | - AR_PHY_ANT_DIV_MAIN_GAINTB | - AR_PHY_ANT_DIV_ALT_GAINTB); - regval |= (ATH_ANT_DIV_COMB_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S); - regval |= (ATH_ANT_DIV_COMB_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S); + regval &= ~AR_PHY_ANT_DIV_LNADIV; + regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S; + if (enable) + regval |= AR_ANT_DIV_ENABLE; + REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); + + /* + * Enable fast antenna diversity. + */ + regval = REG_READ(ah, AR_PHY_CCK_DETECT); + regval &= ~AR_FAST_DIV_ENABLE; + regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; + if (enable) + regval |= AR_FAST_DIV_ENABLE; + + REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); + + if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { + regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); + regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF | + AR_PHY_ANT_DIV_ALT_LNACONF | + AR_PHY_ANT_DIV_ALT_GAINTB | + AR_PHY_ANT_DIV_MAIN_GAINTB)); + /* + * Set MAIN to LNA1 and ALT to LNA2 at the + * beginning. + */ + regval |= (ATH_ANT_DIV_COMB_LNA1 << + AR_PHY_ANT_DIV_MAIN_LNACONF_S); + regval |= (ATH_ANT_DIV_COMB_LNA2 << + AR_PHY_ANT_DIV_ALT_LNACONF_S); + REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); + } + } else if (AR_SREV_9565(ah)) { + if (enable) { + REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL, + (1 << AR_PHY_ANT_SW_RX_PROT_S)); + if (ah->curchan && IS_CHAN_2GHZ(ah->curchan)) + REG_SET_BIT(ah, AR_PHY_RESTART, + AR_PHY_RESTART_ENABLE_DIV_M2FLAG); + REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, + AR_BTCOEX_WL_LNADIV_FORCE_ON); + } else { + REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE); + REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, + (1 << AR_PHY_ANT_SW_RX_PROT_S)); + REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE); + REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV, + AR_BTCOEX_WL_LNADIV_FORCE_ON); + + regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); + regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF | + AR_PHY_ANT_DIV_ALT_LNACONF | + AR_PHY_ANT_DIV_MAIN_GAINTB | + AR_PHY_ANT_DIV_ALT_GAINTB); + regval |= (ATH_ANT_DIV_COMB_LNA1 << + AR_PHY_ANT_DIV_MAIN_LNACONF_S); + regval |= (ATH_ANT_DIV_COMB_LNA2 << + AR_PHY_ANT_DIV_ALT_LNACONF_S); + REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); + } } } -- cgit v0.10.2 From 0cbe8c87fe02513e6de981c18502d220ed4afe41 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Mon, 5 Aug 2013 07:18:28 +0400 Subject: hostap: do not return positive number on failure path in prism2_open() prism2_open() as an .ndo_open handler should not return positive numbers in case of failure, but it does return 1 in a couple of places. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 15f0fad..e4f56ad 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -667,7 +667,7 @@ static int prism2_open(struct net_device *dev) if (local->no_pri) { printk(KERN_DEBUG "%s: could not set interface UP - no PRI " "f/w\n", dev->name); - return 1; + return -ENODEV; } if ((local->func->card_present && !local->func->card_present(local)) || @@ -682,7 +682,7 @@ static int prism2_open(struct net_device *dev) printk(KERN_WARNING "%s: could not enable MAC port\n", dev->name); prism2_close(dev); - return 1; + return -ENODEV; } if (!local->dev_enabled) prism2_callback(local, PRISM2_CALLBACK_ENABLE); -- cgit v0.10.2 From 047dc3ac884f3285bc123461e5e3c38f44fb32a6 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 5 Aug 2013 15:08:26 +0530 Subject: ath9k: Remove ath_ant_comb_update() During a HW reset, the diversity config is programmed in the set_board_values() eeprom callback, there is no need to do it again by calling ath_ant_comb_update(). Fixed antenna support is not fully handled for 1-stream cards, it can be done later. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c index 85391e6..dd1cc73 100644 --- a/drivers/net/wireless/ath/ath9k/antenna.c +++ b/drivers/net/wireless/ath/ath9k/antenna.c @@ -867,26 +867,3 @@ div_comb_done: antcomb->main_recv_cnt = 0; antcomb->alt_recv_cnt = 0; } - -void ath_ant_comb_update(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ath_hw_antcomb_conf div_ant_conf; - u8 lna_conf; - - ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf); - - if (sc->ant_rx == 1) - lna_conf = ATH_ANT_DIV_COMB_LNA1; - else - lna_conf = ATH_ANT_DIV_COMB_LNA2; - - div_ant_conf.main_lna_conf = lna_conf; - div_ant_conf.alt_lna_conf = lna_conf; - - ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf); - - if (common->bt_ant_diversity) - ath9k_hw_set_bt_ant_diversity(ah, true); -} diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 18898ab..c497d52 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -628,7 +628,6 @@ struct ath_ant_comb { }; void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); -void ath_ant_comb_update(struct ath_softc *sc); /********************/ /* Main driver core */ diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index daa316b..a155190 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -307,7 +307,7 @@ static ssize_t write_file_bt_ant_diversity(struct file *file, common->bt_ant_diversity = !!bt_ant_diversity; ath9k_ps_wakeup(sc); - ath_ant_comb_update(sc); + ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity); ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n", common->bt_ant_diversity); ath9k_ps_restore(sc); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1adb803..afeab3c 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -238,9 +238,6 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) ath_restart_work(sc); } - if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) - ath_ant_comb_update(sc); - ieee80211_wake_queues(sc->hw); return true; -- cgit v0.10.2 From b21e3e14aee617ef9f8d4b52a61913d6a359edf0 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 5 Aug 2013 15:08:27 +0530 Subject: ath9k: Fix antenna control init for AR9485 Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 178052f..abdc7ee 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3614,6 +3614,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) } value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); + if (AR_SREV_9485(ah) && common->bt_ant_diversity) { + regval &= ~AR_SWITCH_TABLE_COM2_ALL; + regval |= ah->config.ant_ctrl_comm2g_switch_enable; + + } REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) { @@ -3645,6 +3650,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) regval &= (~AR_PHY_ANT_DIV_LNADIV); regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S; + if (AR_SREV_9485(ah) && common->bt_ant_diversity) + regval |= AR_ANT_DIV_ENABLE; + if (AR_SREV_9565(ah)) { if (common->bt_ant_diversity) { regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S); @@ -3656,10 +3664,14 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); - /*enable fast_div */ + /* enable fast_div */ regval = REG_READ(ah, AR_PHY_CCK_DETECT); regval &= (~AR_FAST_DIV_ENABLE); regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; + + if (AR_SREV_9485(ah) && common->bt_ant_diversity) + regval |= AR_FAST_DIV_ENABLE; + REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { -- cgit v0.10.2 From 2952f6ef5195ea76279f7370f0a6571867e54438 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 5 Aug 2013 15:08:28 +0530 Subject: ath9k: Add more PCI IDs for WB225 cards Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 30652b4..76e8c35 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -72,6 +72,78 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 0xE075), .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV }, + /* WB225 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_ATHEROS, + 0x3119), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_ATHEROS, + 0x3122), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x185F, /* WNC */ + 0x3119), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x185F, /* WNC */ + 0x3027), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0x4105), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0x4106), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0x410D), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0x410E), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0x410F), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0xC706), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0xC680), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_SAMSUNG, + 0xC708), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_LENOVO, + 0x3218), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_LENOVO, + 0x3219), + .driver_data = ATH9K_PCI_BT_ANT_DIV }, + { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ -- cgit v0.10.2 From def4460cdb171d64309c927907c18c4efcb0204b Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Sat, 3 Aug 2013 03:50:35 +0200 Subject: bonding: call slave_last_rx() only once per slave Simple cleanup to not call slave_last_rx() on every time function. It won't give any measurable boost - but looks cleaner and easier to understand. There are no time-consuming functions in between these calls, so it's safe to call it in the beginning only once. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 476df7d..cc13bfe 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2764,7 +2764,7 @@ re_arm: */ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) { - unsigned long trans_start; + unsigned long trans_start, last_rx; struct slave *slave; int extra_ticks; int commit = 0; @@ -2778,11 +2778,12 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) bond_for_each_slave(bond, slave) { slave->new_link = BOND_LINK_NOCHANGE; + last_rx = slave_last_rx(bond, slave); if (slave->link != BOND_LINK_UP) { if (time_in_range(jiffies, - slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) { + last_rx - delta_in_ticks, + last_rx + delta_in_ticks + extra_ticks)) { slave->new_link = BOND_LINK_UP; commit++; @@ -2817,8 +2818,8 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) if (!bond_is_active_slave(slave) && !bond->current_arp_slave && !time_in_range(jiffies, - slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) { + last_rx - delta_in_ticks, + last_rx + 3 * delta_in_ticks + extra_ticks)) { slave->new_link = BOND_LINK_DOWN; commit++; @@ -2836,8 +2837,8 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) trans_start - delta_in_ticks, trans_start + 2 * delta_in_ticks + extra_ticks) || !time_in_range(jiffies, - slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) { + last_rx - delta_in_ticks, + last_rx + 2 * delta_in_ticks + extra_ticks))) { slave->new_link = BOND_LINK_DOWN; commit++; -- cgit v0.10.2 From e7f63f1dc4bd643d9249c653e60c530d4a438147 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Sat, 3 Aug 2013 03:50:36 +0200 Subject: bonding: add bond_time_in_interval() and use it for time comparison Currently we use a lot of time comparison math for arp_interval comparisons, which are sometimes quite hard to read and understand. All the time comparisons have one pattern: (time - arp_interval_jiffies) <= jiffies <= (time + mod * arp_interval_jiffies + arp_interval_jiffies/2) Introduce a new helper - bond_time_in_interval(), which will do the math in one place and, thus, will clean up the logical code. This helper introduces a bit of overhead (by always calculating the jiffies from arp_interval), however it's really not visible, considering that functions using it usually run once in arp_interval milliseconds. There are several lines slightly over 80 chars, however breaking them would result in more hard-to-read code than several character after the 80 mark. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index cc13bfe..d58237b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2635,6 +2635,20 @@ out_unlock: return RX_HANDLER_ANOTHER; } +/* function to verify if we're in the arp_interval timeslice, returns true if + * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval + + * arp_interval/2) . the arp_interval/2 is needed for really fast networks. + */ +static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, + int mod) +{ + int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); + + return time_in_range(jiffies, + last_act - delta_in_ticks, + last_act + mod * delta_in_ticks + delta_in_ticks/2); +} + /* * this function is called regularly to monitor each slave's link * ensuring that traffic is being sent and received when arp monitoring @@ -2648,13 +2662,9 @@ void bond_loadbalance_arp_mon(struct work_struct *work) arp_work.work); struct slave *slave, *oldcurrent; int do_failover = 0; - int delta_in_ticks, extra_ticks; read_lock(&bond->lock); - delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); - extra_ticks = delta_in_ticks / 2; - if (list_empty(&bond->slave_list)) goto re_arm; @@ -2671,12 +2681,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work) unsigned long trans_start = dev_trans_start(slave->dev); if (slave->link != BOND_LINK_UP) { - if (time_in_range(jiffies, - trans_start - delta_in_ticks, - trans_start + delta_in_ticks + extra_ticks) && - time_in_range(jiffies, - slave->dev->last_rx - delta_in_ticks, - slave->dev->last_rx + delta_in_ticks + extra_ticks)) { + if (bond_time_in_interval(bond, trans_start, 1) && + bond_time_in_interval(bond, slave->dev->last_rx, 1)) { slave->link = BOND_LINK_UP; bond_set_active_slave(slave); @@ -2704,12 +2710,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work) * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ - if (!time_in_range(jiffies, - trans_start - delta_in_ticks, - trans_start + 2 * delta_in_ticks + extra_ticks) || - !time_in_range(jiffies, - slave->dev->last_rx - delta_in_ticks, - slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) { + if (!bond_time_in_interval(bond, trans_start, 2) || + !bond_time_in_interval(bond, slave->dev->last_rx, 2)) { slave->link = BOND_LINK_DOWN; bond_set_backup_slave(slave); @@ -2749,7 +2751,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work) re_arm: if (bond->params.arp_interval) - queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); + queue_delayed_work(bond->wq, &bond->arp_work, + msecs_to_jiffies(bond->params.arp_interval)); read_unlock(&bond->lock); } @@ -2762,33 +2765,21 @@ re_arm: * * Called with bond->lock held for read. */ -static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) +static int bond_ab_arp_inspect(struct bonding *bond) { unsigned long trans_start, last_rx; struct slave *slave; - int extra_ticks; int commit = 0; - /* All the time comparisons below need some extra time. Otherwise, on - * fast networks the ARP probe/reply may arrive within the same jiffy - * as it was sent. Then, the next time the ARP monitor is run, one - * arp_interval will already have passed in the comparisons. - */ - extra_ticks = delta_in_ticks / 2; - bond_for_each_slave(bond, slave) { slave->new_link = BOND_LINK_NOCHANGE; last_rx = slave_last_rx(bond, slave); if (slave->link != BOND_LINK_UP) { - if (time_in_range(jiffies, - last_rx - delta_in_ticks, - last_rx + delta_in_ticks + extra_ticks)) { - + if (bond_time_in_interval(bond, last_rx, 1)) { slave->new_link = BOND_LINK_UP; commit++; } - continue; } @@ -2797,9 +2788,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * active. This avoids bouncing, as the last receive * times need a full ARP monitor cycle to be updated. */ - if (time_in_range(jiffies, - slave->jiffies - delta_in_ticks, - slave->jiffies + 2 * delta_in_ticks + extra_ticks)) + if (bond_time_in_interval(bond, slave->jiffies, 2)) continue; /* @@ -2817,10 +2806,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ if (!bond_is_active_slave(slave) && !bond->current_arp_slave && - !time_in_range(jiffies, - last_rx - delta_in_ticks, - last_rx + 3 * delta_in_ticks + extra_ticks)) { - + !bond_time_in_interval(bond, last_rx, 3)) { slave->new_link = BOND_LINK_DOWN; commit++; } @@ -2833,13 +2819,8 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ trans_start = dev_trans_start(slave->dev); if (bond_is_active_slave(slave) && - (!time_in_range(jiffies, - trans_start - delta_in_ticks, - trans_start + 2 * delta_in_ticks + extra_ticks) || - !time_in_range(jiffies, - last_rx - delta_in_ticks, - last_rx + 2 * delta_in_ticks + extra_ticks))) { - + (!bond_time_in_interval(bond, trans_start, 2) || + !bond_time_in_interval(bond, last_rx, 2))) { slave->new_link = BOND_LINK_DOWN; commit++; } @@ -2854,7 +2835,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * * Called with RTNL and bond->lock for read. */ -static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) +static void bond_ab_arp_commit(struct bonding *bond) { unsigned long trans_start; struct slave *slave; @@ -2866,11 +2847,9 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) case BOND_LINK_UP: trans_start = dev_trans_start(slave->dev); - if ((!bond->curr_active_slave && - time_in_range(jiffies, - trans_start - delta_in_ticks, - trans_start + delta_in_ticks + delta_in_ticks / 2)) || - bond->curr_active_slave != slave) { + if (bond->curr_active_slave != slave || + (!bond->curr_active_slave && + bond_time_in_interval(bond, trans_start, 1))) { slave->link = BOND_LINK_UP; if (bond->current_arp_slave) { bond_set_slave_inactive_flags( @@ -3011,7 +2990,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) should_notify_peers = bond_should_notify_peers(bond); - if (bond_ab_arp_inspect(bond, delta_in_ticks)) { + if (bond_ab_arp_inspect(bond)) { read_unlock(&bond->lock); /* Race avoidance with bond_close flush of workqueue */ @@ -3024,7 +3003,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) read_lock(&bond->lock); - bond_ab_arp_commit(bond, delta_in_ticks); + bond_ab_arp_commit(bond); read_unlock(&bond->lock); rtnl_unlock(); -- cgit v0.10.2 From 5a139296f872d438f17f7219411321b603c1622b Mon Sep 17 00:00:00 2001 From: "fan.du" Date: Mon, 5 Aug 2013 17:13:03 +0800 Subject: sctp: Pack dst_cookie into 1st cacheline hole for 64bit host As dst_cookie is used in fast path sctp_transport_dst_check. Before: struct sctp_transport { struct list_head transports; /* 0 16 */ atomic_t refcnt; /* 16 4 */ __u32 dead:1; /* 20:31 4 */ __u32 rto_pending:1; /* 20:30 4 */ __u32 hb_sent:1; /* 20:29 4 */ __u32 pmtu_pending:1; /* 20:28 4 */ /* XXX 28 bits hole, try to pack */ __u32 sack_generation; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ struct flowi fl; /* 32 64 */ /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */ union sctp_addr ipaddr; /* 96 28 */ After: struct sctp_transport { struct list_head transports; /* 0 16 */ atomic_t refcnt; /* 16 4 */ __u32 dead:1; /* 20:31 4 */ __u32 rto_pending:1; /* 20:30 4 */ __u32 hb_sent:1; /* 20:29 4 */ __u32 pmtu_pending:1; /* 20:28 4 */ /* XXX 28 bits hole, try to pack */ __u32 sack_generation; /* 24 4 */ u32 dst_cookie; /* 28 4 */ struct flowi fl; /* 32 64 */ /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */ union sctp_addr ipaddr; /* 96 28 */ Signed-off-by: Fan Du Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index c0f4e29..d9c93a7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -782,6 +782,7 @@ struct sctp_transport { /* Has this transport moved the ctsn since we last sacked */ __u32 sack_generation; + u32 dst_cookie; struct flowi fl; @@ -946,7 +947,6 @@ struct sctp_transport { __u64 hb_nonce; struct rcu_head rcu; - u32 dst_cookie; }; struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *, -- cgit v0.10.2 From c193f3655d9b916a3b8cb685c6b4327397507b2f Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Mon, 5 Aug 2013 17:30:05 +0530 Subject: drivers: net: cpsw: Add support for new CPSW IP version The new IP version has a minor changes and the offsets are same as the previous version, so adding new IP version support in the driver. Signed-off-by: Mugunthan V N Reviewed-by: Felipe Balbi Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index db6933e..cd95671 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -82,6 +82,7 @@ do { \ #define CPSW_VERSION_1 0x19010a #define CPSW_VERSION_2 0x19010c +#define CPSW_VERSION_3 0x19010f #define HOST_PORT_NUM 0 #define SLIVER_SIZE 0x40 @@ -991,6 +992,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); break; case CPSW_VERSION_2: + case CPSW_VERSION_3: slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); break; } @@ -2015,6 +2017,7 @@ static int cpsw_probe(struct platform_device *pdev) dma_params.desc_mem_phys = 0; break; case CPSW_VERSION_2: + case CPSW_VERSION_3: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; priv->hw_stats = ss_regs + CPSW2_HW_STATS; -- cgit v0.10.2 From 7864a1adf7291993d74923fdd0a45459ce9da27e Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Mon, 5 Aug 2013 14:56:06 +0200 Subject: bonding: remove locking from bond_set_rx_mode() We're already protected by RTNL lock, so nothing can happen to bond/its slaves, and thus the locking is useless here (both bond->lock and bond->curr_active_slave). Also, add ASSERT_RTNL() both to bond_set_rx_mode() and bond_hw_addr_swap() to catch possible uses of it without RTNL locking. This patch also saves us from a lockdep false-positive in bond_set_rx_mode() vs bond_hw_addr_swap(). CC: Jay Vosburgh CC: Andy Gospodarek CC: Nikolay Aleksandrov Signed-off-by: Veaceslav Falico Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d58237b..5697043 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -776,6 +776,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active) { + ASSERT_RTNL(); + if (old_active) { if (bond->dev->flags & IFF_PROMISC) dev_set_promiscuity(old_active->dev, -1); @@ -3551,24 +3553,20 @@ static void bond_set_rx_mode(struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - read_lock(&bond->lock); + ASSERT_RTNL(); if (USES_PRIMARY(bond->params.mode)) { - read_lock(&bond->curr_slave_lock); - slave = bond->curr_active_slave; + slave = rtnl_dereference(bond->curr_active_slave); if (slave) { dev_uc_sync(slave->dev, bond_dev); dev_mc_sync(slave->dev, bond_dev); } - read_unlock(&bond->curr_slave_lock); } else { bond_for_each_slave(bond, slave) { dev_uc_sync_multiple(slave->dev, bond_dev); dev_mc_sync_multiple(slave->dev, bond_dev); } } - - read_unlock(&bond->lock); } static int bond_neigh_init(struct neighbour *n) -- cgit v0.10.2 From 02d88fb4fb6a580b0224b3bf07bbfd9d4806f6fb Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Mon, 5 Aug 2013 17:20:09 +0300 Subject: gianfar: Fix Tx csum generation errata handling Both [eTSEC76] and [eTSEC12] errata relate to Tx checksum generation (for some MPC83xx and MCP8548 older revisions). They require the same workaround: manual checksum computation and insertion, and disabling the H/W Tx csum acceleration feature (per frame) through Tx FCB (Frame Control Block) csum offload settings. The workaround for [eTSEC76] needs to be fixed because it currently fails to disable H/W Tx csum insertion via FCB. This patch fixes it and provides a common workaround implementation for both Tx csum errata. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index dbb34f7..352035e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2051,6 +2051,24 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, return skip_txbd(bdp, 1, base, ring_size); } +/* eTSEC12: csum generation not supported for some fcb offsets */ +static inline bool gfar_csum_errata_12(struct gfar_private *priv, + unsigned long fcb_addr) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_12) && + (fcb_addr % 0x20) > 0x18); +} + +/* eTSEC76: csum generation for frames larger than 2500 may + * cause excess delays before start of transmission + */ +static inline bool gfar_csum_errata_76(struct gfar_private *priv, + unsigned int len) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_76) && + (len > 2500)); +} + /* This is called by the kernel when a frame is ready for transmission. * It is pointed to by the dev->hard_start_xmit function pointer */ @@ -2068,19 +2086,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; - /* TOE=1 frames larger than 2500 bytes may see excess delays - * before start of transmission. - */ - if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && - skb->ip_summed == CHECKSUM_PARTIAL && - skb->len > 2500)) { - int ret; - - ret = skb_checksum_help(skb); - if (ret) - return ret; - } - rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; txq = netdev_get_tx_queue(dev, rq); @@ -2187,14 +2192,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Set up checksumming */ if (CHECKSUM_PARTIAL == skb->ip_summed) { fcb = gfar_add_fcb(skb); - /* as specified by errata */ - if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) && - ((unsigned long)fcb % 0x20) > 0x18)) { + lstatus |= BD_LFLAG(TXBD_TOE); + gfar_tx_checksum(skb, fcb, fcb_length); + + if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || + unlikely(gfar_csum_errata_76(priv, skb->len))) { __skb_pull(skb, GMAC_FCB_LEN); skb_checksum_help(skb); - } else { - lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_checksum(skb, fcb, fcb_length); + lstatus &= ~(BD_LFLAG(TXBD_TOE)); + fcb = NULL; } } -- cgit v0.10.2 From 0d0cffdcc6fc4f57080b9a443401e8dad5ca77f8 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Mon, 5 Aug 2013 17:20:10 +0300 Subject: gianfar: Cleanup TxFCB insertion on xmit Cleanup gfar_start_xmit()'s fast path by factoring out "redundant" FCB insertion code (repeated gfar_add_fcb() calls and related) and by reducing the number of if() clauses (i.e. if(fcb) checks). Improve maintainability (e.g. there's less code and easier to read) also by introducing do_csum and do_vlan to mark the other 2 Tx TOE functionalities, following the same model as do_tstamp. fcb_len may also be 0 now, to mark that Tx FCB insertion conditions (do_csum, do_vlan, do_tstamp) have not been met. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 352035e..3cb4647 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2081,10 +2081,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; u32 lstatus; - int i, rq = 0, do_tstamp = 0; + int i, rq = 0; + int do_tstamp, do_csum, do_vlan; u32 bufaddr; unsigned long flags; - unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; + unsigned int nr_frags, nr_txbds, length, fcb_len = 0; rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; @@ -2092,21 +2093,23 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) base = tx_queue->tx_bd_base; regs = tx_queue->grp->regs; + do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); + do_vlan = vlan_tx_tag_present(skb); + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en; + + if (do_csum || do_vlan) + fcb_len = GMAC_FCB_LEN; + /* check if time stamp should be generated */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - priv->hwts_tx_en)) { - do_tstamp = 1; - fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; - } + if (unlikely(do_tstamp)) + fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; /* make space for additional header when fcb is needed */ - if (((skb->ip_summed == CHECKSUM_PARTIAL) || - vlan_tx_tag_present(skb) || - unlikely(do_tstamp)) && - (skb_headroom(skb) < fcb_length)) { + if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { struct sk_buff *skb_new; - skb_new = skb_realloc_headroom(skb, fcb_length); + skb_new = skb_realloc_headroom(skb, fcb_len); if (!skb_new) { dev->stats.tx_errors++; kfree_skb(skb); @@ -2189,37 +2192,38 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) memset(skb->data, 0, GMAC_TXPAL_LEN); } - /* Set up checksumming */ - if (CHECKSUM_PARTIAL == skb->ip_summed) { + /* Add TxFCB if required */ + if (fcb_len) { fcb = gfar_add_fcb(skb); lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_checksum(skb, fcb, fcb_length); + } + + /* Set up checksumming */ + if (do_csum) { + gfar_tx_checksum(skb, fcb, fcb_len); if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || unlikely(gfar_csum_errata_76(priv, skb->len))) { __skb_pull(skb, GMAC_FCB_LEN); skb_checksum_help(skb); - lstatus &= ~(BD_LFLAG(TXBD_TOE)); - fcb = NULL; + if (do_vlan || do_tstamp) { + /* put back a new fcb for vlan/tstamp TOE */ + fcb = gfar_add_fcb(skb); + } else { + /* Tx TOE not used */ + lstatus &= ~(BD_LFLAG(TXBD_TOE)); + fcb = NULL; + } } } - if (vlan_tx_tag_present(skb)) { - if (unlikely(NULL == fcb)) { - fcb = gfar_add_fcb(skb); - lstatus |= BD_LFLAG(TXBD_TOE); - } - + if (do_vlan) gfar_tx_vlan(skb, fcb); - } /* Setup tx hardware time stamping if requested */ if (unlikely(do_tstamp)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - if (fcb == NULL) - fcb = gfar_add_fcb(skb); fcb->ptp = 1; - lstatus |= BD_LFLAG(TXBD_TOE); } txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, @@ -2231,9 +2235,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) * the full frame length. */ if (unlikely(do_tstamp)) { - txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; + txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | - (skb_headlen(skb) - fcb_length); + (skb_headlen(skb) - fcb_len); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); -- cgit v0.10.2 From 0659eea912cf2ce65298784b918904ba0f91f5e1 Mon Sep 17 00:00:00 2001 From: Fan Du Date: Thu, 1 Aug 2013 18:08:36 +0800 Subject: xfrm: Delete hold_timer when destroy policy Both policy timer and hold_timer need to be deleted when destroy policy Signed-off-by: Fan Du Signed-off-by: Steffen Klassert diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d8da6b8..f7078eb 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -308,7 +308,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) { BUG_ON(!policy->walk.dead); - if (del_timer(&policy->timer)) + if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) BUG(); security_xfrm_policy_free(policy->security); -- cgit v0.10.2 From 70e3ca79cd0e98d4191aa10b3393c23a9b84a2e8 Mon Sep 17 00:00:00 2001 From: Dragos Foianu Date: Thu, 11 Jul 2013 09:45:42 +0300 Subject: ipvs: fixed spacing at for statements found using checkpatch.pl Signed-off-by: Dragos Foianu Signed-off-by: Simon Horman diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 3cd85b2..5199448 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -414,7 +414,7 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc) spin_lock_bh(&svc->sched_lock); tbl->dead = 1; - for (i=0; ibucket[i], list) { ip_vs_lblcr_free(en); } @@ -440,7 +440,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) struct ip_vs_lblcr_entry *en; struct hlist_node *next; - for (i=0, j=tbl->rover; irover; i < IP_VS_LBLCR_TAB_SIZE; i++) { j = (j + 1) & IP_VS_LBLCR_TAB_MASK; spin_lock(&svc->sched_lock); @@ -495,7 +495,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) if (goal > tbl->max_size/2) goal = tbl->max_size/2; - for (i=0, j=tbl->rover; irover; i < IP_VS_LBLCR_TAB_SIZE; i++) { j = (j + 1) & IP_VS_LBLCR_TAB_MASK; spin_lock(&svc->sched_lock); @@ -536,7 +536,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) /* * Initialize the hash buckets */ - for (i=0; ibucket[i]); } tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; -- cgit v0.10.2 From 3848ab66827bddd7eb760c58dec909f0af1c00a5 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Tue, 30 Jul 2013 15:29:37 +0300 Subject: iwlwifi: mvm: Add RX statistics debugfs entry Add a debugfs entry for the RX statistics received from the firmware. Signed-off-by: Matti Gottlieb Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 15d52ba..2ee256d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -592,6 +592,142 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, } #undef BT_MBOX_PRINT +#define PRINT_STATS_LE32(_str, _val) \ + pos += scnprintf(buf + pos, bufsz - pos, \ + fmt_table, _str, \ + le32_to_cpu(_val)) + +static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + static const char *fmt_table = "\t%-30s %10u\n"; + static const char *fmt_header = "%-32s\n"; + int pos = 0; + char *buf; + int ret; + int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 + + sizeof(struct mvm_statistics_rx_non_phy) * 10 + + sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200; + struct mvm_statistics_rx_phy *ofdm; + struct mvm_statistics_rx_phy *cck; + struct mvm_statistics_rx_non_phy *general; + struct mvm_statistics_rx_ht_phy *ht; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + ofdm = &mvm->rx_stats.ofdm; + cck = &mvm->rx_stats.cck; + general = &mvm->rx_stats.general; + ht = &mvm->rx_stats.ofdm_ht; + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - OFDM"); + PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt); + PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt); + PRINT_STATS_LE32("plcp_err", ofdm->plcp_err); + PRINT_STATS_LE32("crc32_err", ofdm->crc32_err); + PRINT_STATS_LE32("overrun_err", ofdm->overrun_err); + PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err); + PRINT_STATS_LE32("crc32_good", ofdm->crc32_good); + PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt); + PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt); + PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout); + PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout); + PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts); + PRINT_STATS_LE32("rxe_frame_lmt_overrun", + ofdm->rxe_frame_limit_overrun); + PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt); + PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt); + PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt); + PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill); + PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err); + PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum); + PRINT_STATS_LE32("reserved", ofdm->reserved); + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - CCK"); + PRINT_STATS_LE32("ina_cnt", cck->ina_cnt); + PRINT_STATS_LE32("fina_cnt", cck->fina_cnt); + PRINT_STATS_LE32("plcp_err", cck->plcp_err); + PRINT_STATS_LE32("crc32_err", cck->crc32_err); + PRINT_STATS_LE32("overrun_err", cck->overrun_err); + PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err); + PRINT_STATS_LE32("crc32_good", cck->crc32_good); + PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt); + PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt); + PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout); + PRINT_STATS_LE32("fina_timeout", cck->fina_timeout); + PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts); + PRINT_STATS_LE32("rxe_frame_lmt_overrun", + cck->rxe_frame_limit_overrun); + PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt); + PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt); + PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt); + PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill); + PRINT_STATS_LE32("mh_format_err", cck->mh_format_err); + PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum); + PRINT_STATS_LE32("reserved", cck->reserved); + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - GENERAL"); + PRINT_STATS_LE32("bogus_cts", general->bogus_cts); + PRINT_STATS_LE32("bogus_ack", general->bogus_ack); + PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames); + PRINT_STATS_LE32("filtered_frames", general->filtered_frames); + PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons); + PRINT_STATS_LE32("channel_beacons", general->channel_beacons); + PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon); + PRINT_STATS_LE32("adc_rx_saturation_time", + general->adc_rx_saturation_time); + PRINT_STATS_LE32("ina_detection_search_time", + general->ina_detection_search_time); + PRINT_STATS_LE32("beacon_silence_rssi_a", + general->beacon_silence_rssi_a); + PRINT_STATS_LE32("beacon_silence_rssi_b", + general->beacon_silence_rssi_b); + PRINT_STATS_LE32("beacon_silence_rssi_c", + general->beacon_silence_rssi_c); + PRINT_STATS_LE32("interference_data_flag", + general->interference_data_flag); + PRINT_STATS_LE32("channel_load", general->channel_load); + PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms); + PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a); + PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b); + PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c); + PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a); + PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b); + PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c); + PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills); + PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu); + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - HT"); + PRINT_STATS_LE32("plcp_err", ht->plcp_err); + PRINT_STATS_LE32("overrun_err", ht->overrun_err); + PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err); + PRINT_STATS_LE32("crc32_good", ht->crc32_good); + PRINT_STATS_LE32("crc32_err", ht->crc32_err); + PRINT_STATS_LE32("mh_format_err", ht->mh_format_err); + PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good); + PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt); + PRINT_STATS_LE32("agg_cnt", ht->agg_cnt); + PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs); + + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; +} +#undef PRINT_STAT_LE32 + static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) @@ -924,6 +1060,7 @@ MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); +MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart); #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram); @@ -947,6 +1084,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 5dc5dfd..76f6a1f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -421,6 +421,8 @@ struct iwl_mvm { struct iwl_notif_wait_data notif_wait; + struct mvm_statistics_rx rx_stats; + unsigned long transport_queue_stop; u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 65e5fb8..2fcc8ef 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -440,6 +440,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, else mvm->pm_ops = &pm_legacy_ops; + memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); + return op_mode; out_unregister: diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 6fd7fae..5057fd3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -378,6 +378,18 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, return 0; } +static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, + struct iwl_notif_statistics *stats) +{ + /* + * NOTE FW aggregates the statistics - BUT the statistics are cleared + * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS + * bit set. + */ + lockdep_assert_held(&mvm->mutex); + memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx)); +} + /* * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler * @@ -396,6 +408,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, mvm->temperature = le32_to_cpu(common->temperature); iwl_mvm_tt_handler(mvm); } + iwl_mvm_update_rx_statistics(mvm, stats); return 0; } -- cgit v0.10.2 From eeb89ab1f081a62bee1695d82442996b99a5b2b3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 30 Jul 2013 20:10:46 +0200 Subject: iwlwifi: pcie: fix resume when no opmode is present If no opmode is present during suspend/resume (i.e. if the iwldvm or iwlmvm isn't loaded) the driver crashes during resume, trying to call the rfkill notification. Avoid that, and also don't enable the rfkill interrupt in this case (to avoid crashing trying to handle the interrupt later.) Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index cec0c89..601ee59 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -825,6 +825,9 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) { bool hw_rfkill; + if (!trans->op_mode) + return 0; + iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); -- cgit v0.10.2 From 977342bc3d672f6d11386c19ccee3f8b2400bcde Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Sun, 28 Jul 2013 23:02:44 +0000 Subject: iwlwifi: mvm: remove traffic load monitoring in rs The traffic load monitoring isn't used anymore to decide whether a Tx aggregation on a specific TID should be started. No point in collecting these statistics. Remove the relevant code. Signed-off-by: Eyal Shapira Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index e4a7b387..940f39d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -260,82 +260,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) return (ant_type & valid_antenna) == ant_type; } -/* - * removes the old data from the statistics. All data that is older than - * TID_MAX_TIME_DIFF, will be deleted. - */ -static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) -{ - /* The oldest age we want to keep */ - u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; - - while (tl->queue_count && - (tl->time_stamp < oldest_time)) { - tl->total -= tl->packet_count[tl->head]; - tl->packet_count[tl->head] = 0; - tl->time_stamp += TID_QUEUE_CELL_SPACING; - tl->queue_count--; - tl->head++; - if (tl->head >= TID_QUEUE_MAX_SIZE) - tl->head = 0; - } -} - -/* - * increment traffic load value for tid and also remove - * any old values if passed the certain time period - */ -static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, - struct ieee80211_hdr *hdr) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 index; - struct iwl_traffic_load *tl = NULL; - u8 tid; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } else { - return IWL_MAX_TID_COUNT; - } - - if (unlikely(tid >= IWL_MAX_TID_COUNT)) - return IWL_MAX_TID_COUNT; - - tl = &lq_data->load[tid]; - - curr_time -= curr_time % TID_ROUND_VALUE; - - /* Happens only for the first packet. Initialize the data */ - if (!(tl->queue_count)) { - tl->total = 1; - tl->time_stamp = curr_time; - tl->queue_count = 1; - tl->head = 0; - tl->packet_count[0] = 1; - return IWL_MAX_TID_COUNT; - } - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - index = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (index >= TID_QUEUE_MAX_SIZE) - rs_tl_rm_old_stats(tl, curr_time); - - index = (tl->head + index) % TID_QUEUE_MAX_SIZE; - tl->packet_count[index] = tl->packet_count[index] + 1; - tl->total = tl->total + 1; - - if ((index + 1) > tl->queue_count) - tl->queue_count = index + 1; - - return tid; -} - #ifdef CONFIG_MAC80211_DEBUGFS /** * Program the device to use fixed rate for frame transmit @@ -361,45 +285,11 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm, } #endif -/* - get the traffic load value for tid -*/ -static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) -{ - u32 curr_time = jiffies_to_msecs(jiffies); - u32 time_diff; - s32 index; - struct iwl_traffic_load *tl = NULL; - - if (tid >= IWL_MAX_TID_COUNT) - return 0; - - tl = &(lq_data->load[tid]); - - curr_time -= curr_time % TID_ROUND_VALUE; - - if (!(tl->queue_count)) - return 0; - - time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); - index = time_diff / TID_QUEUE_CELL_SPACING; - - /* The history is too long: remove data that is older than */ - /* TID_MAX_TIME_DIFF */ - if (index >= TID_QUEUE_MAX_SIZE) - rs_tl_rm_old_stats(tl, curr_time); - - return tl->total; -} - static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_data, u8 tid, struct ieee80211_sta *sta) { int ret = -EAGAIN; - u32 load; - - load = rs_tl_get_load(lq_data, tid); /* * Don't create TX aggregation sessions when in high @@ -2086,6 +1976,22 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm, iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false); } +static u8 rs_get_tid(struct iwl_lq_sta *lq_data, + struct ieee80211_hdr *hdr) +{ + u8 tid = IWL_MAX_TID_COUNT; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + if (unlikely(tid > IWL_MAX_TID_COUNT)) + tid = IWL_MAX_TID_COUNT; + + return tid; +} + /* * Do rate scaling and search for new modulation mode. */ @@ -2129,7 +2035,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; - tid = rs_tl_add_packet(lq_sta, hdr); + tid = rs_get_tid(lq_sta, hdr); if ((tid != IWL_MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { tid_data = &sta_priv->tid_data[tid]; diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 29d699a..4a99a4d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -290,17 +290,6 @@ struct iwl_scale_tbl_info { struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ }; -struct iwl_traffic_load { - unsigned long time_stamp; /* age of the oldest statistics */ - u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time - * slice */ - u32 total; /* total num of packets during the - * last TID_MAX_TIME_DIFF */ - u8 queue_count; /* number of queues that has - * been used since the last cleanup */ - u8 head; /* start of the circular buffer */ -}; - /** * struct iwl_lq_sta -- driver's rate scaling private structure * @@ -337,7 +326,6 @@ struct iwl_lq_sta { struct iwl_lq_cmd lq; struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ - struct iwl_traffic_load load[IWL_MAX_TID_COUNT]; u8 tx_agg_tid_en; #ifdef CONFIG_MAC80211_DEBUGFS struct dentry *rs_sta_dbgfs_scale_table_file; -- cgit v0.10.2 From 17dbe56445328fcc275f2ca318c06e55d7d403df Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Thu, 1 Aug 2013 07:19:27 +0300 Subject: iwlwifi: mvm: fix signal reporting for < 3 antennas When fewer than three antennas are connected (as is always the case for the current devices), the signal strength reporting was wrong; fix it. Signed-off-by: Avri Altman Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 5057fd3..ee6547d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -167,6 +167,9 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm, /* * iwl_mvm_get_signal_strength - use new rx PHY INFO API + * values are reported by the fw as positive values - need to negate + * to obtain their dBM. Account for missing antennas by replacing 0 + * values by -256dBm: practically 0 power and a non-feasible 8 bit value. */ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info, @@ -177,12 +180,15 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); - energy_a = -((val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> - IWL_RX_INFO_ENERGY_ANT_A_POS); - energy_b = -((val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> - IWL_RX_INFO_ENERGY_ANT_B_POS); - energy_c = -((val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> - IWL_RX_INFO_ENERGY_ANT_C_POS); + energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> + IWL_RX_INFO_ENERGY_ANT_A_POS; + energy_a = energy_a ? -energy_a : -256; + energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> + IWL_RX_INFO_ENERGY_ANT_B_POS; + energy_b = energy_b ? -energy_b : -256; + energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> + IWL_RX_INFO_ENERGY_ANT_C_POS; + energy_c = energy_c ? -energy_c : -256; max_energy = max(energy_a, energy_b); max_energy = max(max_energy, energy_c); -- cgit v0.10.2 From f5e45f2d960cea226b9d173e232c94c4c388c1bc Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 25 Jul 2013 22:36:27 +0200 Subject: iwlwifi: mvm: small cleanups in quota management code Use a C99 initializer to clear the command and move the lockdep assertion before the restart check. Since this causes problems with the BUILD_BUG_ON() with some compilers, change that a bit. Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c index 1897387..5c6ae16 100644 --- a/drivers/net/wireless/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/iwlwifi/mvm/quota.c @@ -131,7 +131,7 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) { - struct iwl_time_quota_cmd cmd; + struct iwl_time_quota_cmd cmd = {}; int i, idx, ret, num_active_macs, quota, quota_rem; struct iwl_mvm_quota_iterator_data data = { .n_interfaces = {}, @@ -139,15 +139,14 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif) .new_vif = newvif, }; + lockdep_assert_held(&mvm->mutex); + /* update all upon completion */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return 0; - BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1); - - lockdep_assert_held(&mvm->mutex); - - memset(&cmd, 0, sizeof(cmd)); + /* iterator data above must match */ + BUILD_BUG_ON(MAX_BINDINGS != 4); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, -- cgit v0.10.2 From e89044d75e50e047bd34db81ac8d18500626f40f Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Tue, 16 Jul 2013 17:33:26 +0300 Subject: iwlwifi: fix some documentation typos Fix some typos. Signed-off-by: Eliad Peller Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index 98c7aa7..976448a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -93,7 +93,7 @@ struct iwl_cfg; * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the * capabilities advertized by the fw file (in TLV format). * 2) The driver layer starts the op_mode (ops->start) - * 3) The op_mode registers registers mac80211 + * 3) The op_mode registers mac80211 * 4) The op_mode is governed by mac80211 * 5) The driver layer stops the op_mode */ @@ -112,7 +112,7 @@ struct iwl_cfg; * @stop: stop the op_mode. Must free all the memory allocated. * May sleep * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the - * HCMD the this Rx responds to. + * HCMD this Rx responds to. * This callback may sleep, it is called from a threaded IRQ handler. * @queue_full: notifies that a HW queue is full. * Must be atomic and called with BH disabled. diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 8d91422c..523dc7c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -180,7 +180,7 @@ struct iwl_rx_packet { * enum CMD_MODE - how to send the host commands ? * * @CMD_SYNC: The caller will be stalled until the fw responds to the command - * @CMD_ASYNC: Return right away and don't want for the response + * @CMD_ASYNC: Return right away and don't wait for the response * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the * response. The caller needs to call iwl_free_resp when done. */ @@ -218,7 +218,7 @@ struct iwl_device_cmd { * * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's * ring. The transport layer doesn't map the command's buffer to DMA, but - * rather copies it to an previously allocated DMA buffer. This flag tells + * rather copies it to a previously allocated DMA buffer. This flag tells * the transport layer not to copy the command, but to map the existing * buffer (that is passed in) instead. This saves the memcpy and allows * commands that are bigger than the fixed buffer to be submitted. @@ -243,7 +243,7 @@ enum iwl_hcmd_dataflag { * @handler_status: return value of the handler of the command * (put in setup_rx_handlers) - valid for SYNC mode only * @flags: can be CMD_* - * @len: array of the lenths of the chunks in data + * @len: array of the lengths of the chunks in data * @dataflags: IWL_HCMD_DFL_* * @id: id of the host command */ diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 76cdba6..deebe8f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -503,7 +503,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - /* Allocate resources for the MAC context, and add it the the fw */ + /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) goto out_unlock; diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 7694b8f..f68ef9d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -173,7 +173,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, } /* - * for data packets, rate info comes from the table inside he fw. This + * for data packets, rate info comes from the table inside the fw. This * table is controlled by LINK_QUALITY commands */ diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 134f7a1..f29cd5c 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1153,10 +1153,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) /* * iwl_pcie_enqueue_hcmd - enqueue a uCode command * @priv: device private data point - * @cmd: a point to the ucode command structure + * @cmd: a pointer to the ucode command structure * - * The function returns < 0 values to indicate the operation is - * failed. On success, it turns the index (> 0) of command in the + * The function returns < 0 values to indicate the operation + * failed. On success, it returns the index (>= 0) of command in the * command queue. */ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, -- cgit v0.10.2 From 1092b9bc0cdc1d8d456d2b9b1f857b06ef5523da Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Tue, 16 Jul 2013 17:53:43 +0300 Subject: iwlwifi: pcie: some little cleanups do some little cleanups in tx.c - eliminate duplicate checks, use locally cached fields and predefined macros. Signed-off-by: Eliad Peller Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index f29cd5c..011167c 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -451,13 +451,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, return -EINVAL; } - if (WARN_ON(addr & ~DMA_BIT_MASK(36))) + if (WARN(addr & ~IWL_TX_DMA_MASK, + "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; - if (unlikely(addr & ~IWL_TX_DMA_MASK)) - IWL_ERR(trans, "Unaligned address = %llx\n", - (unsigned long long)addr); - iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); return 0; @@ -1631,7 +1628,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * Check here that the packets are in the right place on the ring. */ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE(trans_pcie->txq[txq_id].ampdu && + WARN_ONCE(txq->ampdu && (wifi_seq & 0xff) != q->write_ptr, "Q: %d WiFi Seq %d tfdNum %d", txq_id, wifi_seq, q->write_ptr); @@ -1663,7 +1660,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; - tb1_len = (len + 3) & ~3; + tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) -- cgit v0.10.2 From 5fdda0476c28ff2097ae37f43f40557b91eb0160 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 2 Aug 2013 10:51:22 +0200 Subject: iwlwifi: remove transport suspend/resume indirection There's no reason for the transport to call itself through indirect function pointers, inline the (little) code there is and remove the indirection completely. Reviewed-by: Gregory Greenman Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 523dc7c..dd57a36 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -396,8 +396,6 @@ struct iwl_trans; * May sleep * @dbgfs_register: add the dbgfs files under this directory. Files will be * automatically deleted. - * @suspend: stop the device unless WoWLAN is configured - * @resume: resume activity of the device * @write8: write a u8 to a register at offset ofs from the BAR * @write32: write a u32 to a register at offset ofs from the BAR * @read32: read a u32 register at offset ofs from the BAR @@ -443,10 +441,7 @@ struct iwl_trans_ops { int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); int (*wait_tx_queue_empty)(struct iwl_trans *trans); -#ifdef CONFIG_PM_SLEEP - int (*suspend)(struct iwl_trans *trans); - int (*resume)(struct iwl_trans *trans); -#endif + void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); u32 (*read32)(struct iwl_trans *trans, u32 ofs); @@ -700,18 +695,6 @@ static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, return trans->ops->dbgfs_register(trans, dir); } -#ifdef CONFIG_PM_SLEEP -static inline int iwl_trans_suspend(struct iwl_trans *trans) -{ - return trans->ops->suspend(trans); -} - -static inline int iwl_trans_resume(struct iwl_trans *trans) -{ - return trans->ops->resume(trans); -} -#endif - static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) { trans->ops->write8(trans, ofs, val); diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index db6be24..9ec8dfe 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -367,21 +367,19 @@ static void iwl_pci_remove(struct pci_dev *pdev) static int iwl_pci_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); - /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. */ - return iwl_trans_suspend(iwl_trans); + return 0; } static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); + struct iwl_trans *trans = pci_get_drvdata(pdev); + bool hw_rfkill; /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if @@ -394,7 +392,15 @@ static int iwl_pci_resume(struct device *device) */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - return iwl_trans_resume(iwl_trans); + if (!trans->op_mode) + return 0; + + iwl_enable_rfkill_int(trans); + + hw_rfkill = iwl_is_rfkill_set(trans); + iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + + return 0; } static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 601ee59..d88c0c7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -815,28 +815,6 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); } -#ifdef CONFIG_PM_SLEEP -static int iwl_trans_pcie_suspend(struct iwl_trans *trans) -{ - return 0; -} - -static int iwl_trans_pcie_resume(struct iwl_trans *trans) -{ - bool hw_rfkill; - - if (!trans->op_mode) - return 0; - - iwl_enable_rfkill_int(trans); - - hw_rfkill = iwl_is_rfkill_set(trans); - iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); - - return 0; -} -#endif /* CONFIG_PM_SLEEP */ - static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, unsigned long *flags) { @@ -1378,10 +1356,6 @@ static const struct iwl_trans_ops trans_ops_pcie = { .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, -#ifdef CONFIG_PM_SLEEP - .suspend = iwl_trans_pcie_suspend, - .resume = iwl_trans_pcie_resume, -#endif .write8 = iwl_trans_pcie_write8, .write32 = iwl_trans_pcie_write32, .read32 = iwl_trans_pcie_read32, -- cgit v0.10.2 From bd3351ba3e5e3e5b635532fab63da502129141f9 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Tue, 16 Jul 2013 17:50:17 +0300 Subject: iwlwifi: mvm: add some missing cleanups in iwl_mvm_mac_add_interface iwl_mvm_mac_add_interface() didn't clean up beacon filtering configuration and ctxt allocation in some error cases. Signed-off-by: Eliad Peller Reviewed-by: Alexander Bondar Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index deebe8f..05daa90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -564,6 +564,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, iwl_mvm_power_update_mode(mvm, vif); /* beacon filtering */ + ret = iwl_mvm_disable_beacon_filter(mvm, vif); + if (ret) + goto out_remove_mac; + if (!mvm->bf_allowed_vif && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ @@ -571,10 +575,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; } - ret = iwl_mvm_disable_beacon_filter(mvm, vif); - if (ret) - goto out_release; - /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding @@ -585,7 +585,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!mvmvif->phy_ctxt) { ret = -ENOSPC; - goto out_remove_mac; + goto out_free_bf; } iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); @@ -609,6 +609,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, iwl_mvm_binding_remove_vif(mvm, vif); out_unref_phy: iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + out_free_bf: + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + } out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); -- cgit v0.10.2 From ef4394b9477f9078d78ae3e8359eae094c9b19d8 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Tue, 16 Jul 2013 18:07:20 +0300 Subject: iwlwifi: mvm: use designated initialization for some arrays rs_ht_to_legacy and ant_toggle_lookup are arrays that represent some state-machine. initialize them explicitly with designated initialization to make them more clear and avoid errors. Signed-off-by: Eliad Peller Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 940f39d..d680c89 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -56,24 +56,30 @@ #define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) static u8 rs_ht_to_legacy[] = { - IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, - IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, - IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, - IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, - IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX + [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX, + [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX, + [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX, + [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX, + [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX, + [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX, + [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX, + [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX, + [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX, + [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX, + [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX, + [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX, + [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX, }; static const u8 ant_toggle_lookup[] = { - /*ANT_NONE -> */ ANT_NONE, - /*ANT_A -> */ ANT_B, - /*ANT_B -> */ ANT_C, - /*ANT_AB -> */ ANT_BC, - /*ANT_C -> */ ANT_A, - /*ANT_AC -> */ ANT_AB, - /*ANT_BC -> */ ANT_AC, - /*ANT_ABC -> */ ANT_ABC, + [ANT_NONE] = ANT_NONE, + [ANT_A] = ANT_B, + [ANT_B] = ANT_C, + [ANT_AB] = ANT_BC, + [ANT_C] = ANT_A, + [ANT_AC] = ANT_AB, + [ANT_BC] = ANT_AC, + [ANT_ABC] = ANT_ABC, }; #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ -- cgit v0.10.2 From e7f1935c11269bc53cd52425b1025657adddb839 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 25 Jul 2013 21:45:17 +0200 Subject: wireless: make TU conversion macros available A few places in the code (mac80211 and iwlmvm) use the same TU_TO_JIFFIES() macro and could use TU_TO_EXP_TIME() that mac80211 has. Make these available to everyone and use them. Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index ad9bbca..9f10036 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -73,7 +73,6 @@ #include "iwl-prph.h" /* A TimeUnit is 1024 microsecond */ -#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024)) #define MSEC_TO_TU(_msec) (_msec*1000/1024) /* @@ -191,8 +190,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, iwl_mvm_te_clear_data(mvm, te_data); } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { te_data->running = true; - te_data->end_jiffies = jiffies + - TU_TO_JIFFIES(te_data->duration); + te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); @@ -329,8 +327,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); if (te_data->running && - time_after(te_data->end_jiffies, - jiffies + TU_TO_JIFFIES(min_duration))) { + time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", jiffies_to_msecs(te_data->end_jiffies - jiffies)); return; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b3ce299..23a8877 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2288,4 +2288,8 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, return !!(tim->virtual_map[index] & mask); } +/* convert time units */ +#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) +#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) + #endif /* LINUX_IEEE80211_H */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index e94c840..b618651 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -53,9 +53,6 @@ struct ieee80211_local; * increased memory use (about 2 kB of RAM per entry). */ #define IEEE80211_FRAGMENT_MAX 4 -#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) -#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) - /* power level hasn't been configured (or set to automatic) */ #define IEEE80211_UNSET_POWER_LEVEL INT_MIN -- cgit v0.10.2 From 54e35cc52346149a7bce8a2f622e215ed17bb56d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 6 Aug 2013 11:20:23 +0200 Subject: ipvs: ip_vs_sh: ip_vs_sh_get_port: check skb_header_pointer for NULL skb_header_pointer could return NULL, so check for it as we do it everywhere else in ipvs code. This fixes a coverity warning. Signed-off-by: Daniel Borkmann Acked-by: Julian Anastasov Signed-off-by: Simon Horman diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index f16c027..3588fae 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -269,14 +269,20 @@ ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph) switch (iph->protocol) { case IPPROTO_TCP: th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); + if (unlikely(th == NULL)) + return 0; port = th->source; break; case IPPROTO_UDP: uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph); + if (unlikely(uh == NULL)) + return 0; port = uh->source; break; case IPPROTO_SCTP: sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph); + if (unlikely(sh == NULL)) + return 0; port = sh->source; break; default: -- cgit v0.10.2 From 8cc8df906f953ae0cfe785720989928021d7fe2d Mon Sep 17 00:00:00 2001 From: Bartosz Markowski Date: Fri, 2 Aug 2013 09:58:49 +0200 Subject: ath10k: add SoC power save option to PCI features map Unify the PCI options location. By default the SoC PS option is disabled to boost the performance and due to poor stability on early HW revisions. In future we can remove the module parameter and turn on/off the PS for given hardware. This change also makes the pci module parameter for SoC PS static. Signed-off-by: Bartosz Markowski Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 503e380..e2f9ef5 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -32,7 +32,7 @@ #include "ce.h" #include "pci.h" -unsigned int ath10k_target_ps; +static unsigned int ath10k_target_ps; module_param(ath10k_target_ps, uint, 0644); MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); @@ -1759,6 +1759,7 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) static int ath10k_pci_hif_power_up(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; ret = ath10k_pci_start_intr(ar); @@ -1783,13 +1784,9 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) if (ret) goto err_irq; - if (ath10k_target_ps) { - ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n"); - } else { + if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) /* Force AWAKE forever */ - ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n"); ath10k_do_pci_wake(ar); - } ret = ath10k_pci_ce_init(ar); if (ret) @@ -1810,7 +1807,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) err_ce: ath10k_pci_ce_deinit(ar); err_ps: - if (!ath10k_target_ps) + if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) ath10k_do_pci_sleep(ar); err_irq: ath10k_pci_stop_intr(ar); @@ -1820,9 +1817,12 @@ err: static void ath10k_pci_hif_power_down(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + ath10k_pci_stop_intr(ar); + ath10k_pci_ce_deinit(ar); - if (!ath10k_target_ps) + if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) ath10k_do_pci_sleep(ar); } @@ -2272,6 +2272,9 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND: ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); break; + case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: + ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n"); + break; } } } @@ -2307,6 +2310,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_ar_pci; } + if (ath10k_target_ps) + set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); + ath10k_pci_dump_features(ar_pci); ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index d3a2e6c..871bb33 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -153,6 +153,7 @@ struct service_to_pipe { enum ath10k_pci_features { ATH10K_PCI_FEATURE_MSI_X = 0, ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1, + ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2, /* keep last */ ATH10K_PCI_FEATURE_COUNT @@ -335,20 +336,22 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) return ioread32(ar_pci->mem + offset); } -extern unsigned int ath10k_target_ps; - void ath10k_do_pci_wake(struct ath10k *ar); void ath10k_do_pci_sleep(struct ath10k *ar); static inline void ath10k_pci_wake(struct ath10k *ar) { - if (ath10k_target_ps) + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) ath10k_do_pci_wake(ar); } static inline void ath10k_pci_sleep(struct ath10k *ar) { - if (ath10k_target_ps) + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) ath10k_do_pci_sleep(ar); } -- cgit v0.10.2 From f32036e823c45cb4974aab1d0ae66d716bfc9aa6 Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Thu, 20 Jun 2013 12:47:20 +0530 Subject: ath6kl: Fix race in heart beat polling Make sure to cancel heart beat timer before freeing wmi to avoid potential NULL pointer dereference. Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 6a67881..4f316bd 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1836,6 +1836,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar) clear_bit(WMI_READY, &ar->flag); + if (ar->fw_recovery.enable) + del_timer_sync(&ar->fw_recovery.hb_timer); + /* * After wmi_shudown all WMI events will be dropped. We * need to cleanup the buffers allocated in AP mode and -- cgit v0.10.2 From 9d0e2f0772d394060bf3b17cd1f3a35574365103 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Mon, 5 Aug 2013 10:19:22 +0530 Subject: ath6kl: Fix invalid pointer access on fuzz testing with AP mode In our Fuz testing, reference client corrupts the dest mac to "00:00:00:00:00:00" in the WPA2 handshake no 2. During driver init the sta_list entries mac addresses are by default "00:00:00:00:00:00". Driver returns an invalid pointer (conn) and the drver shall crash, if rxtids (aggr_conn) skb queues are accessed, since they would not be initialized. Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index d4fcfca..5839fc2 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -29,6 +29,9 @@ struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr) struct ath6kl_sta *conn = NULL; u8 i, max_conn; + if (is_zero_ether_addr(node_addr)) + return NULL; + max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; for (i = 0; i < max_conn; i++) { -- cgit v0.10.2 From af83fde7513b6c4acd869ad4fb93893704439470 Mon Sep 17 00:00:00 2001 From: Fan Du Date: Tue, 6 Aug 2013 15:50:56 +0800 Subject: xfrm: Remove rebundant address family checking present_and_same_family has checked addresses family validness for both SADB_EXT_ADDRESS_SRC and SADB_EXT_ADDRESS_DST in the beginning. Thereafter pfkey_sadb_addr2xfrm_addr doesn't need to do the checking again. Signed-off-by: Fan Du Signed-off-by: Steffen Klassert diff --git a/net/key/af_key.c b/net/key/af_key.c index d49f676..9d58537 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1196,10 +1196,6 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1], &x->props.saddr); - if (!x->props.family) { - err = -EAFNOSUPPORT; - goto out; - } pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1], &x->id.daddr); @@ -2205,10 +2201,6 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); - if (!xp->family) { - err = -EINVAL; - goto out; - } xp->selector.family = xp->family; xp->selector.prefixlen_s = sa->sadb_address_prefixlen; xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); -- cgit v0.10.2 From b971f847cd754bb75eb65bfcf60c750af67bedd2 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 6 Aug 2013 09:27:15 +0530 Subject: be2net: Adding more speeds reported by get_settings The new speeds are supported by variants of the Skyhawk-R chip. Signed-off-by: Vasundhara Volam Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 2bd2d1e..ce15b10 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1464,6 +1464,12 @@ static int be_mac_to_link_speed(int mac_speed) return 1000; case PHY_LINK_SPEED_10GBPS: return 10000; + case PHY_LINK_SPEED_20GBPS: + return 20000; + case PHY_LINK_SPEED_25GBPS: + return 25000; + case PHY_LINK_SPEED_40GBPS: + return 40000; } return 0; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index eb541f0..9b91608 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -960,7 +960,10 @@ enum { PHY_LINK_SPEED_10MBPS = 0x1, PHY_LINK_SPEED_100MBPS = 0x2, PHY_LINK_SPEED_1GBPS = 0x3, - PHY_LINK_SPEED_10GBPS = 0x4 + PHY_LINK_SPEED_10GBPS = 0x4, + PHY_LINK_SPEED_20GBPS = 0x5, + PHY_LINK_SPEED_25GBPS = 0x6, + PHY_LINK_SPEED_40GBPS = 0x7 }; struct be_cmd_resp_link_status { -- cgit v0.10.2 From d696b5e26c9963cc68dc559f2fcaa7e0e653aa8a Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 6 Aug 2013 09:27:16 +0530 Subject: be2net: Do not call get_die_temperature cmd for VF This is a chip wide value and the PFs already report it. Signed-off-by: Vasundhara Volam Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4c40e3e..82597e5 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4144,7 +4144,8 @@ static void be_worker(struct work_struct *work) be_cmd_get_stats(adapter, &adapter->stats_cmd); } - if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) + if (be_physfn(adapter) && + MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) be_cmd_get_die_temperature(adapter); for_all_rx_queues(adapter, rxo, i) { -- cgit v0.10.2 From fd8c7be722e96fa658cd5e7a3305389b923b454d Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 6 Aug 2013 09:27:17 +0530 Subject: be2net: don't limit max MAC and VLAN counts For SH-R and Lancer-R, use the FW supported values for Max unicast MACs, Max VLANs and Max multicast MACs. Signed-off-by: Vasundhara Volam Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 82597e5..ff2b40d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2996,13 +2996,6 @@ static void be_get_resources(struct be_adapter *adapter) } if (profile_present) { - /* Sanity fixes for Lancer */ - adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt, - BE_UC_PMAC_COUNT); - adapter->max_vlans = min_t(u16, adapter->max_vlans, - BE_NUM_VLANS_SUPPORTED); - adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac, - BE_MAX_MC); adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues, MAX_TX_QS); adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues, -- cgit v0.10.2 From 68cb7e47f0682633704e48c12aa4166c56f194ad Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 6 Aug 2013 09:27:18 +0530 Subject: be2net: Fix displaying supported speeds for BE2 The BE2 FW GET_PHY_DETAILS cmd does not return fixed speeds supported. Signed-off-by: Vasundhara Volam Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index ce15b10..48e266a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2454,6 +2454,12 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) le16_to_cpu(resp_phy_info->fixed_speeds_supported); adapter->phy.misc_params = le32_to_cpu(resp_phy_info->misc_params); + + if (BE2_chip(adapter)) { + adapter->phy.fixed_speeds_supported = + BE_SUPPORTED_SPEED_10GBPS | + BE_SUPPORTED_SPEED_1GBPS; + } } pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); -- cgit v0.10.2 From 05ccaa2b24d402bb16949239b5525ce7236c2e74 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 6 Aug 2013 09:27:19 +0530 Subject: be2net: fixup log msgs for async events Log the event type for unknown async events Signed-off-by: Vasundhara Volam Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 48e266a..e49e8cd 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -258,7 +258,8 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter, (struct be_async_event_grp5_pvid_state *)evt); break; default: - dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); + dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", + event_type); break; } } @@ -279,7 +280,8 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter, adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; break; default: - dev_warn(&adapter->pdev->dev, "Unknown debug event\n"); + dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", + event_type); break; } } -- cgit v0.10.2 From 117affe3adb5dc26e7cc2af891934b7123ef40fa Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 6 Aug 2013 09:27:20 +0530 Subject: be2net: Initialize "status" in be_cmd_get_die_temperature() Uninitialized value was being returned in the non-failure case. Signed-off-by: Vasundhara Volam Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index e49e8cd..5175f97 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1532,7 +1532,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_cntl_addnl_attribs *req; - int status; + int status = 0; spin_lock_bh(&adapter->mcc_lock); -- cgit v0.10.2 From 5721f9432de025065b42375f0cbeaf936958b8b6 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 6 Aug 2013 09:27:21 +0530 Subject: be2net: update driver version Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index c827b1b..11c815d 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -34,7 +34,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "4.6.62.0u" +#define DRV_VER "4.9.134.0u" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" -- cgit v0.10.2 From 02481bc678076f05643e7b240b2ea7c9aa1e72e1 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 6 Aug 2013 15:50:07 -0700 Subject: bnx2: Handle error condition in ->slot_reset() by closing the device if necessary. Otherwise, since NAPI state is already disabled, a subsequent close will hang the system. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 6a2de1d..3baf8b5 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8694,14 +8694,13 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - pci_ers_result_t result; - int err; + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + int err = 0; rtnl_lock(); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); - result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); @@ -8709,9 +8708,15 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) if (netif_running(dev)) { bnx2_set_power_state(bp, PCI_D0); - bnx2_init_nic(bp, 1); + err = bnx2_init_nic(bp, 1); } - result = PCI_ERS_RESULT_RECOVERED; + if (!err) + result = PCI_ERS_RESULT_RECOVERED; + } + + if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) { + bnx2_napi_enable(bp); + dev_close(dev); } rtnl_unlock(); -- cgit v0.10.2 From 6d5e85c71bf02fa10b0ea716776a71619b677f87 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 6 Aug 2013 15:50:08 -0700 Subject: bnx2: Use kernel APIs for WoL and power state changes. Simple API changes with no functional changes. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 3baf8b5..ac72f80 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3911,21 +3911,12 @@ init_cpu_err: static int bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) { - u16 pmcsr; - - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); - switch (state) { case PCI_D0: { u32 val; - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | - PCI_PM_CTRL_PME_STATUS); - - if (pmcsr & PCI_PM_CTRL_STATE_MASK) - /* delay required during transition out of D3hot */ - msleep(20); + pci_enable_wake(bp->pdev, PCI_D0, false); + pci_set_power_state(bp->pdev, PCI_D0); val = BNX2_RD(bp, BNX2_EMAC_MODE); val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; @@ -4018,26 +4009,19 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pci_wake_from_d3(bp->pdev, bp->wol); if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { if (bp->wol) - pmcsr |= 3; - } - else { - pmcsr |= 3; - } - if (bp->wol) { - pmcsr |= PCI_PM_CTRL_PME_ENABLE; + pci_set_power_state(bp->pdev, PCI_D3hot); + } else { + pci_set_power_state(bp->pdev, PCI_D3hot); } - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - pmcsr); /* No more memory access after this point until * device is brought back to D0. */ - udelay(50); break; } default: @@ -7081,6 +7065,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) else { bp->wol = 0; } + + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + return 0; } @@ -8369,6 +8356,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->wol = 0; } + if (bp->flags & BNX2_FLAG_NO_WOL) + device_set_wakeup_capable(&bp->pdev->dev, false); + else + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bp->tx_quick_cons_trip_int = bp->tx_quick_cons_trip; -- cgit v0.10.2 From b6a23e91cccf7118c04edb71f28eac8c7bedc8d4 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 6 Aug 2013 15:50:09 -0700 Subject: bnx2: Refactor WoL setup into a separate function. Separate MAC and PHY WoL setup code into a separate function. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index ac72f80..0b54ca0 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3908,6 +3908,86 @@ init_cpu_err: return rc; } +static void +bnx2_setup_wol(struct bnx2 *bp) +{ + int i; + u32 val, wol_msg; + + if (bp->wol) { + u32 advertising; + u8 autoneg; + + autoneg = bp->autoneg; + advertising = bp->advertising; + + if (bp->phy_port == PORT_TP) { + bp->autoneg = AUTONEG_SPEED; + bp->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg; + } + + spin_lock_bh(&bp->phy_lock); + bnx2_setup_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + + bp->autoneg = autoneg; + bp->advertising = advertising; + + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + + val = BNX2_RD(bp, BNX2_EMAC_MODE); + + /* Enable port mode. */ + val &= ~BNX2_EMAC_MODE_PORT; + val |= BNX2_EMAC_MODE_MPKT_RCVD | + BNX2_EMAC_MODE_ACPI_RCVD | + BNX2_EMAC_MODE_MPKT; + if (bp->phy_port == PORT_TP) { + val |= BNX2_EMAC_MODE_PORT_MII; + } else { + val |= BNX2_EMAC_MODE_PORT_GMII; + if (bp->line_speed == SPEED_2500) + val |= BNX2_EMAC_MODE_25G_MODE; + } + + BNX2_WR(bp, BNX2_EMAC_MODE, val); + + /* receive all multicast */ + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); + } + BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE); + + val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN; + BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA); + + /* Need to enable EMAC and RPM for WOL. */ + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); + + val = BNX2_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + BNX2_WR(bp, BNX2_RPM_CONFIG, val); + + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; + } else { + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; + } + + if (!(bp->flags & BNX2_FLAG_NO_WOL)) + bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); + +} + static int bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) { @@ -3929,86 +4009,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) break; } case PCI_D3hot: { - int i; - u32 val, wol_msg; - - if (bp->wol) { - u32 advertising; - u8 autoneg; - - autoneg = bp->autoneg; - advertising = bp->advertising; - - if (bp->phy_port == PORT_TP) { - bp->autoneg = AUTONEG_SPEED; - bp->advertising = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_Autoneg; - } - - spin_lock_bh(&bp->phy_lock); - bnx2_setup_phy(bp, bp->phy_port); - spin_unlock_bh(&bp->phy_lock); - - bp->autoneg = autoneg; - bp->advertising = advertising; - - bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); - - val = BNX2_RD(bp, BNX2_EMAC_MODE); - - /* Enable port mode. */ - val &= ~BNX2_EMAC_MODE_PORT; - val |= BNX2_EMAC_MODE_MPKT_RCVD | - BNX2_EMAC_MODE_ACPI_RCVD | - BNX2_EMAC_MODE_MPKT; - if (bp->phy_port == PORT_TP) - val |= BNX2_EMAC_MODE_PORT_MII; - else { - val |= BNX2_EMAC_MODE_PORT_GMII; - if (bp->line_speed == SPEED_2500) - val |= BNX2_EMAC_MODE_25G_MODE; - } - - BNX2_WR(bp, BNX2_EMAC_MODE, val); - - /* receive all multicast */ - for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - 0xffffffff); - } - BNX2_WR(bp, BNX2_EMAC_RX_MODE, - BNX2_EMAC_RX_MODE_SORT_MODE); - - val = 1 | BNX2_RPM_SORT_USER0_BC_EN | - BNX2_RPM_SORT_USER0_MC_EN; - BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); - BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); - BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | - BNX2_RPM_SORT_USER0_ENA); - - /* Need to enable EMAC and RPM for WOL. */ - BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); - - val = BNX2_RD(bp, BNX2_RPM_CONFIG); - val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - BNX2_WR(bp, BNX2_RPM_CONFIG, val); - - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; - } - else { - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; - } - - if (!(bp->flags & BNX2_FLAG_NO_WOL)) - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, - 1, 0); - + bnx2_setup_wol(bp); pci_wake_from_d3(bp->pdev, bp->wol); if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { -- cgit v0.10.2 From 28fb4eb4b7f29abaef3e79be5fc049a904221698 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 6 Aug 2013 15:50:10 -0700 Subject: bnx2: Use SIMPLE_DEV_PM_OPS. This simplifies the suspend/resume code. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 0b54ca0..27a128c 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8602,46 +8602,52 @@ bnx2_remove_one(struct pci_dev *pdev) } static int -bnx2_suspend(struct pci_dev *pdev, pm_message_t state) +bnx2_suspend(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - /* PCI register 4 needs to be saved whether netif_running() or not. - * MSI address and data need to be saved if using MSI and - * netif_running(). - */ - pci_save_state(pdev); - if (!netif_running(dev)) - return 0; - - cancel_work_sync(&bp->reset_task); - bnx2_netif_stop(bp, true); - netif_device_detach(dev); - del_timer_sync(&bp->timer); - bnx2_shutdown_chip(bp); - bnx2_free_skbs(bp); - bnx2_set_power_state(bp, pci_choose_state(pdev, state)); + if (netif_running(dev)) { + cancel_work_sync(&bp->reset_task); + bnx2_netif_stop(bp, true); + netif_device_detach(dev); + del_timer_sync(&bp->timer); + bnx2_shutdown_chip(bp); + __bnx2_free_irq(bp); + bnx2_free_skbs(bp); + } + bnx2_setup_wol(bp); return 0; } static int -bnx2_resume(struct pci_dev *pdev) +bnx2_resume(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - pci_restore_state(pdev); if (!netif_running(dev)) return 0; bnx2_set_power_state(bp, PCI_D0); netif_device_attach(dev); + bnx2_request_irq(bp); bnx2_init_nic(bp, 1); bnx2_netif_start(bp, true); return 0; } +#ifdef CONFIG_PM_SLEEP +static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); +#define BNX2_PM_OPS (&bnx2_pm_ops) + +#else + +#define BNX2_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ /** * bnx2_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -8757,8 +8763,7 @@ static struct pci_driver bnx2_pci_driver = { .id_table = bnx2_pci_tbl, .probe = bnx2_init_one, .remove = bnx2_remove_one, - .suspend = bnx2_suspend, - .resume = bnx2_resume, + .driver.pm = BNX2_PM_OPS, .err_handler = &bnx2_err_handler, }; -- cgit v0.10.2 From 25bfb1dd4ba3b2d9a49ce9d9b0cd7be1840e15ed Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 6 Aug 2013 15:50:11 -0700 Subject: bnx2: Add pci shutdown handler. WoL and power state changes will now be done in the shutdown handler. open/close/ethtool will no longer change the power state. NVRAM operations can now be permitted whether the device is up or down. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 27a128c..88f5ab1 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6302,7 +6302,6 @@ bnx2_open(struct net_device *dev) netif_carrier_off(dev); - bnx2_set_power_state(bp, PCI_D0); bnx2_disable_int(bp); rc = bnx2_setup_int_mode(bp, disable_msi); @@ -6709,7 +6708,6 @@ bnx2_close(struct net_device *dev) bnx2_del_napi(bp); bp->link_up = 0; netif_carrier_off(bp->dev); - bnx2_set_power_state(bp, PCI_D3hot); return 0; } @@ -7144,9 +7142,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, struct bnx2 *bp = netdev_priv(dev); int rc; - if (!netif_running(dev)) - return -EAGAIN; - /* parameters already validated in ethtool_get_eeprom */ rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); @@ -7161,9 +7156,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, struct bnx2 *bp = netdev_priv(dev); int rc; - if (!netif_running(dev)) - return -EAGAIN; - /* parameters already validated in ethtool_set_eeprom */ rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); @@ -7523,8 +7515,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { struct bnx2 *bp = netdev_priv(dev); - bnx2_set_power_state(bp, PCI_D0); - memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); if (etest->flags & ETH_TEST_FL_OFFLINE) { int i; @@ -7573,8 +7563,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) etest->flags |= ETH_TEST_FL_FAILED; } - if (!netif_running(bp->dev)) - bnx2_set_power_state(bp, PCI_D3hot); } static void @@ -7646,8 +7634,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) switch (state) { case ETHTOOL_ID_ACTIVE: - bnx2_set_power_state(bp, PCI_D0); - bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); return 1; /* cycle on/off once per second */ @@ -7668,9 +7654,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) case ETHTOOL_ID_INACTIVE: BNX2_WR(bp, BNX2_EMAC_LED, 0); BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); - - if (!netif_running(dev)) - bnx2_set_power_state(bp, PCI_D3hot); break; } @@ -8118,8 +8101,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) goto err_out_release; } - bnx2_set_power_state(bp, PCI_D0); - /* Configure byte swap and enable write to the reg_window registers. * Rely on CPU to do target byte swapping on big endian systems * The chip's target access swapping will not swap all accesses @@ -8705,10 +8686,9 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) pci_restore_state(pdev); pci_save_state(pdev); - if (netif_running(dev)) { - bnx2_set_power_state(bp, PCI_D0); + if (netif_running(dev)) err = bnx2_init_nic(bp, 1); - } + if (!err) result = PCI_ERS_RESULT_RECOVERED; } @@ -8752,6 +8732,28 @@ static void bnx2_io_resume(struct pci_dev *pdev) rtnl_unlock(); } +static void bnx2_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + if (netif_running(dev)) + dev_close(bp->dev); + + if (system_state == SYSTEM_POWER_OFF) + bnx2_set_power_state(bp, PCI_D3hot); + + rtnl_unlock(); +} + static const struct pci_error_handlers bnx2_err_handler = { .error_detected = bnx2_io_error_detected, .slot_reset = bnx2_io_slot_reset, @@ -8765,6 +8767,7 @@ static struct pci_driver bnx2_pci_driver = { .remove = bnx2_remove_one, .driver.pm = BNX2_PM_OPS, .err_handler = &bnx2_err_handler, + .shutdown = bnx2_shutdown, }; module_pci_driver(bnx2_pci_driver); -- cgit v0.10.2 From 8a56d243ca60c92c0b80307422ae75676873a715 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 6 Aug 2013 15:50:12 -0700 Subject: bnx2: Update version to 2.2.4 and update copyright year. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 88f5ab1..6fdfc18 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1,6 +1,6 @@ /* bnx2.c: Broadcom NX2 network driver. * - * Copyright (c) 2004-2011 Broadcom Corporation + * Copyright (c) 2004-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,8 +58,8 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.3" -#define DRV_MODULE_RELDATE "June 27, 2012" +#define DRV_MODULE_VERSION "2.2.4" +#define DRV_MODULE_RELDATE "Aug 05, 2013" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 172efbe..18cb2d2 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -1,6 +1,6 @@ /* bnx2.h: Broadcom NX2 network driver. * - * Copyright (c) 2004-2011 Broadcom Corporation + * Copyright (c) 2004-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by -- cgit v0.10.2 From 6261d983f226f0a6a8d4d32b57a032bc23a5ebb6 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 5 Aug 2013 22:51:37 -0700 Subject: ip_tunnel: embed hash list head The IP tunnel hash heads can be embedded in the per-net structure since it is a fixed size. Reduce the size so that the total structure fits in a page size. The original size was overly large, even NETDEV_HASHBITS is only 8 bits! Also, add some white space for readability. Signed-off-by: Stephen Hemminger Acked-by: Pravin B Shelar . Signed-off-by: David S. Miller diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 781b3cf..c6acd9f 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -86,12 +86,12 @@ struct tnl_ptk_info { #define PACKET_RCVD 0 #define PACKET_REJECT 1 -#define IP_TNL_HASH_BITS 10 +#define IP_TNL_HASH_BITS 7 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS) struct ip_tunnel_net { - struct hlist_head *tunnels; struct net_device *fb_tunnel_dev; + struct hlist_head tunnels[IP_TNL_HASH_SIZE]; }; #ifdef CONFIG_INET diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index ca1cb2d..9fdf8a6 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -838,15 +838,16 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, { struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); struct ip_tunnel_parm parms; + unsigned int i; - itn->tunnels = kzalloc(IP_TNL_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL); - if (!itn->tunnels) - return -ENOMEM; + for (i = 0; i < IP_TNL_HASH_SIZE; i++) + INIT_HLIST_HEAD(&itn->tunnels[i]); if (!ops) { itn->fb_tunnel_dev = NULL; return 0; } + memset(&parms, 0, sizeof(parms)); if (devname) strlcpy(parms.name, devname, IFNAMSIZ); @@ -854,10 +855,9 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, rtnl_lock(); itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms); rtnl_unlock(); - if (IS_ERR(itn->fb_tunnel_dev)) { - kfree(itn->tunnels); + + if (IS_ERR(itn->fb_tunnel_dev)) return PTR_ERR(itn->fb_tunnel_dev); - } return 0; } @@ -887,7 +887,6 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn) ip_tunnel_destroy(itn, &list); unregister_netdevice_many(&list); rtnl_unlock(); - kfree(itn->tunnels); } EXPORT_SYMBOL_GPL(ip_tunnel_delete_net); -- cgit v0.10.2 From b4bf07771faaf959b0a916d35b1b930c030e30a8 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 6 Aug 2013 17:45:03 +0800 Subject: net: move iov_pages() to net/core/iovec.c To let it be reused and reduce code duplication. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a98fb0e..dfec20d 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -698,29 +698,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, return 0; } -static unsigned long iov_pages(const struct iovec *iv, int offset, - unsigned long nr_segs) -{ - unsigned long seg, base; - int pages = 0, len, size; - - while (nr_segs && (offset >= iv->iov_len)) { - offset -= iv->iov_len; - ++iv; - --nr_segs; - } - - for (seg = 0; seg < nr_segs; seg++) { - base = (unsigned long)iv[seg].iov_base + offset; - len = iv[seg].iov_len - offset; - size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; - pages += size; - offset = 0; - } - - return pages; -} - /* Get packet from user space buffer */ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, const struct iovec *iv, unsigned long total_len, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5dce262..18527ef 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1041,29 +1041,6 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return 0; } -static unsigned long iov_pages(const struct iovec *iv, int offset, - unsigned long nr_segs) -{ - unsigned long seg, base; - int pages = 0, len, size; - - while (nr_segs && (offset >= iv->iov_len)) { - offset -= iv->iov_len; - ++iv; - --nr_segs; - } - - for (seg = 0; seg < nr_segs; seg++) { - base = (unsigned long)iv[seg].iov_base + offset; - len = iv[seg].iov_len - offset; - size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; - pages += size; - offset = 0; - } - - return pages; -} - /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, const struct iovec *iv, diff --git a/include/linux/socket.h b/include/linux/socket.h index 230c04b..445ef75 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -313,6 +313,8 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, unsigned int len, __wsum *csump); +extern unsigned long iov_pages(const struct iovec *iov, int offset, + unsigned long nr_segs); extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, diff --git a/net/core/iovec.c b/net/core/iovec.c index de178e4..b77eeec 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -212,3 +212,27 @@ out_fault: goto out; } EXPORT_SYMBOL(csum_partial_copy_fromiovecend); + +unsigned long iov_pages(const struct iovec *iov, int offset, + unsigned long nr_segs) +{ + unsigned long seg, base; + int pages = 0, len, size; + + while (nr_segs && (offset >= iov->iov_len)) { + offset -= iov->iov_len; + ++iov; + --nr_segs; + } + + for (seg = 0; seg < nr_segs; seg++) { + base = (unsigned long)iov[seg].iov_base + offset; + len = iov[seg].iov_len - offset; + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; + pages += size; + offset = 0; + } + + return pages; +} +EXPORT_SYMBOL(iov_pages); -- cgit v0.10.2 From c3bdeb5c7cc073ccf5ff9624642022a8613a956e Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 6 Aug 2013 17:45:04 +0800 Subject: net: move zerocopy_sg_from_iovec() to net/core/datagram.c To let it be reused and reduce code duplication. Also document this function. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index dfec20d..182364a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -536,86 +536,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, return skb; } -/* set skb frags from iovec, this can move to core network code for reuse */ -static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, - int offset, size_t count) -{ - int len = iov_length(from, count) - offset; - int copy = skb_headlen(skb); - int size, offset1 = 0; - int i = 0; - - /* Skip over from offset */ - while (count && (offset >= from->iov_len)) { - offset -= from->iov_len; - ++from; - --count; - } - - /* copy up to skb headlen */ - while (count && (copy > 0)) { - size = min_t(unsigned int, copy, from->iov_len - offset); - if (copy_from_user(skb->data + offset1, from->iov_base + offset, - size)) - return -EFAULT; - if (copy > size) { - ++from; - --count; - offset = 0; - } else - offset += size; - copy -= size; - offset1 += size; - } - - if (len == offset1) - return 0; - - while (count--) { - struct page *page[MAX_SKB_FRAGS]; - int num_pages; - unsigned long base; - unsigned long truesize; - - len = from->iov_len - offset; - if (!len) { - offset = 0; - ++from; - continue; - } - base = (unsigned long)from->iov_base + offset; - size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; - if (i + size > MAX_SKB_FRAGS) - return -EMSGSIZE; - num_pages = get_user_pages_fast(base, size, 0, &page[i]); - if (num_pages != size) { - int j; - - for (j = 0; j < num_pages; j++) - put_page(page[i + j]); - return -EFAULT; - } - truesize = size * PAGE_SIZE; - skb->data_len += len; - skb->len += len; - skb->truesize += truesize; - atomic_add(truesize, &skb->sk->sk_wmem_alloc); - while (len) { - int off = base & ~PAGE_MASK; - int size = min_t(int, len, PAGE_SIZE - off); - __skb_fill_page_desc(skb, i, page[i], off, size); - skb_shinfo(skb)->nr_frags++; - /* increase sk_wmem_alloc */ - base += size; - len -= size; - i++; - } - offset = 0; - ++from; - } - return 0; -} - /* * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should * be shared with the tun/tap driver. diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18527ef..b163047 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -961,86 +961,6 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, return skb; } -/* set skb frags from iovec, this can move to core network code for reuse */ -static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, - int offset, size_t count) -{ - int len = iov_length(from, count) - offset; - int copy = skb_headlen(skb); - int size, offset1 = 0; - int i = 0; - - /* Skip over from offset */ - while (count && (offset >= from->iov_len)) { - offset -= from->iov_len; - ++from; - --count; - } - - /* copy up to skb headlen */ - while (count && (copy > 0)) { - size = min_t(unsigned int, copy, from->iov_len - offset); - if (copy_from_user(skb->data + offset1, from->iov_base + offset, - size)) - return -EFAULT; - if (copy > size) { - ++from; - --count; - offset = 0; - } else - offset += size; - copy -= size; - offset1 += size; - } - - if (len == offset1) - return 0; - - while (count--) { - struct page *page[MAX_SKB_FRAGS]; - int num_pages; - unsigned long base; - unsigned long truesize; - - len = from->iov_len - offset; - if (!len) { - offset = 0; - ++from; - continue; - } - base = (unsigned long)from->iov_base + offset; - size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; - if (i + size > MAX_SKB_FRAGS) - return -EMSGSIZE; - num_pages = get_user_pages_fast(base, size, 0, &page[i]); - if (num_pages != size) { - int j; - - for (j = 0; j < num_pages; j++) - put_page(page[i + j]); - return -EFAULT; - } - truesize = size * PAGE_SIZE; - skb->data_len += len; - skb->len += len; - skb->truesize += truesize; - atomic_add(truesize, &skb->sk->sk_wmem_alloc); - while (len) { - int off = base & ~PAGE_MASK; - int size = min_t(int, len, PAGE_SIZE - off); - __skb_fill_page_desc(skb, i, page[i], off, size); - skb_shinfo(skb)->nr_frags++; - /* increase sk_wmem_alloc */ - base += size; - len -= size; - i++; - } - offset = 0; - ++from; - } - return 0; -} - /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, const struct iovec *iv, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ae46475..5ac96f3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2359,6 +2359,10 @@ extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, const struct iovec *from, int from_offset, int len); +extern int zerocopy_sg_from_iovec(struct sk_buff *skb, + const struct iovec *frm, + int offset, + size_t count); extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, const struct iovec *to, diff --git a/net/core/datagram.c b/net/core/datagram.c index 8ab48cd..f987fae 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -573,6 +573,99 @@ fault: } EXPORT_SYMBOL(skb_copy_datagram_from_iovec); +/** + * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec + * @skb: buffer to copy + * @from: io vector to copy to + * @offset: offset in the io vector to start copying from + * @count: amount of vectors to copy to buffer from + * + * The function will first copy up to headlen, and then pin the userspace + * pages and build frags through them. + * + * Returns 0, -EFAULT or -EMSGSIZE. + * Note: the iovec is not modified during the copy + */ +int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, + int offset, size_t count) +{ + int len = iov_length(from, count) - offset; + int copy = skb_headlen(skb); + int size, offset1 = 0; + int i = 0; + + /* Skip over from offset */ + while (count && (offset >= from->iov_len)) { + offset -= from->iov_len; + ++from; + --count; + } + + /* copy up to skb headlen */ + while (count && (copy > 0)) { + size = min_t(unsigned int, copy, from->iov_len - offset); + if (copy_from_user(skb->data + offset1, from->iov_base + offset, + size)) + return -EFAULT; + if (copy > size) { + ++from; + --count; + offset = 0; + } else + offset += size; + copy -= size; + offset1 += size; + } + + if (len == offset1) + return 0; + + while (count--) { + struct page *page[MAX_SKB_FRAGS]; + int num_pages; + unsigned long base; + unsigned long truesize; + + len = from->iov_len - offset; + if (!len) { + offset = 0; + ++from; + continue; + } + base = (unsigned long)from->iov_base + offset; + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; + if (i + size > MAX_SKB_FRAGS) + return -EMSGSIZE; + num_pages = get_user_pages_fast(base, size, 0, &page[i]); + if (num_pages != size) { + int j; + + for (j = 0; j < num_pages; j++) + put_page(page[i + j]); + return -EFAULT; + } + truesize = size * PAGE_SIZE; + skb->data_len += len; + skb->len += len; + skb->truesize += truesize; + atomic_add(truesize, &skb->sk->sk_wmem_alloc); + while (len) { + int off = base & ~PAGE_MASK; + int size = min_t(int, len, PAGE_SIZE - off); + __skb_fill_page_desc(skb, i, page[i], off, size); + skb_shinfo(skb)->nr_frags++; + /* increase sk_wmem_alloc */ + base += size; + len -= size; + i++; + } + offset = 0; + ++from; + } + return 0; +} +EXPORT_SYMBOL(zerocopy_sg_from_iovec); + static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 __user *to, int len, __wsum *csump) -- cgit v0.10.2 From 0a57ec62dfa4b639d86aba60c236904def1d0356 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 6 Aug 2013 17:45:05 +0800 Subject: net: use skb_fill_page_desc() in zerocopy_sg_from_iovec() Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/net/core/datagram.c b/net/core/datagram.c index f987fae..bec0867 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -652,8 +652,7 @@ int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, while (len) { int off = base & ~PAGE_MASK; int size = min_t(int, len, PAGE_SIZE - off); - __skb_fill_page_desc(skb, i, page[i], off, size); - skb_shinfo(skb)->nr_frags++; + skb_fill_page_desc(skb, i, page[i], off, size); /* increase sk_wmem_alloc */ base += size; len -= size; -- cgit v0.10.2 From 234a4267085e85d0cc5552c340833bb2d82a50e7 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 6 Aug 2013 17:45:06 +0800 Subject: net: remove the useless comment in zerocopy_sg_from_iovec() Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/net/core/datagram.c b/net/core/datagram.c index bec0867..a8a795c 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -653,7 +653,6 @@ int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, int off = base & ~PAGE_MASK; int size = min_t(int, len, PAGE_SIZE - off); skb_fill_page_desc(skb, i, page[i], off, size); - /* increase sk_wmem_alloc */ base += size; len -= size; i++; -- cgit v0.10.2 From 0433547aa7443cefc89d9b533169bdc50f1206b8 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 6 Aug 2013 17:45:07 +0800 Subject: net: use release_pages() in zerocopy_sg_from_iovec() To reduce the duplicated codes. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/net/core/datagram.c b/net/core/datagram.c index a8a795c..badcd93 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -638,10 +639,7 @@ int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - int j; - - for (j = 0; j < num_pages; j++) - put_page(page[i + j]); + release_pages(&page[i], num_pages, 0); return -EFAULT; } truesize = size * PAGE_SIZE; -- cgit v0.10.2 From 3d9953a2ef2182d56e268742259b11dedb8e281d Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 6 Aug 2013 17:45:08 +0800 Subject: net: use skb_copy_datagram_from_iovec() in zerocopy_sg_from_iovec() Use skb_copy_datagram_from_iovec() to avoid code duplication and make it easy to be read. Also we can do the skipping inside the zero-copy loop. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/net/core/datagram.c b/net/core/datagram.c index badcd93..af814e7 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -591,48 +591,31 @@ int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, int offset, size_t count) { int len = iov_length(from, count) - offset; - int copy = skb_headlen(skb); - int size, offset1 = 0; + int copy = min_t(int, skb_headlen(skb), len); + int size; int i = 0; - /* Skip over from offset */ - while (count && (offset >= from->iov_len)) { - offset -= from->iov_len; - ++from; - --count; - } - /* copy up to skb headlen */ - while (count && (copy > 0)) { - size = min_t(unsigned int, copy, from->iov_len - offset); - if (copy_from_user(skb->data + offset1, from->iov_base + offset, - size)) - return -EFAULT; - if (copy > size) { - ++from; - --count; - offset = 0; - } else - offset += size; - copy -= size; - offset1 += size; - } + if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy)) + return -EFAULT; - if (len == offset1) + if (len == copy) return 0; + offset += copy; while (count--) { struct page *page[MAX_SKB_FRAGS]; int num_pages; unsigned long base; unsigned long truesize; - len = from->iov_len - offset; - if (!len) { - offset = 0; + /* Skip over from offset and copied */ + if (offset >= from->iov_len) { + offset -= from->iov_len; ++from; continue; } + len = from->iov_len - offset; base = (unsigned long)from->iov_base + offset; size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; if (i + size > MAX_SKB_FRAGS) -- cgit v0.10.2 From 32d73b144eacce83871199251081763c26659c0f Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Tue, 6 Aug 2013 17:29:35 +0900 Subject: net: phy: micrel: Staticize ksz8873mll_read_status() ksz8873mll_read_status() is used only in this file. Fix the following sparse warning: drivers/net/phy/micrel.c:147:5: warning: symbol 'ksz8873mll_read_status' was not declared. Should it be static? Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 2510435..9ca4945 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -144,7 +144,7 @@ static int ks8051_config_init(struct phy_device *phydev) #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 #define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6) #define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4) -int ksz8873mll_read_status(struct phy_device *phydev) +static int ksz8873mll_read_status(struct phy_device *phydev) { int regval; -- cgit v0.10.2 From 16dda493312b15898025177c109a55aef84288ff Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Tue, 6 Aug 2013 17:32:58 +0900 Subject: net: phy: mdio: add missing __iomem annotation Added missing __iomem annotation in order to fix the following sparse warnings: drivers/net/phy/mdio-mux-mmioreg.c:51:27: warning: incorrect type in initializer (different address spaces) drivers/net/phy/mdio-mux-mmioreg.c:51:27: expected void *p drivers/net/phy/mdio-mux-mmioreg.c:51:27: got void [noderef] * drivers/net/phy/mdio-mux-mmioreg.c:57:21: warning: incorrect type in argument 1 (different address spaces) drivers/net/phy/mdio-mux-mmioreg.c:57:21: expected void const volatile [noderef] *addr drivers/net/phy/mdio-mux-mmioreg.c:57:21: got void *p drivers/net/phy/mdio-mux-mmioreg.c:60:25: warning: incorrect type in argument 2 (different address spaces) drivers/net/phy/mdio-mux-mmioreg.c:60:25: expected void volatile [noderef] *addr drivers/net/phy/mdio-mux-mmioreg.c:60:25: got void *p drivers/net/phy/mdio-mux-mmioreg.c:64:25: warning: incorrect type in argument 1 (different address spaces) drivers/net/phy/mdio-mux-mmioreg.c:64:25: expected void volatile [noderef] *addr drivers/net/phy/mdio-mux-mmioreg.c:64:25: got void *p Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 9733bd2..f8e305d 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -48,7 +48,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child, struct mdio_mux_mmioreg_state *s = data; if (current_child ^ desired_child) { - void *p = ioremap(s->phys, 1); + void __iomem *p = ioremap(s->phys, 1); uint8_t x, y; if (!p) -- cgit v0.10.2 From 09effa67a18d893fc4e6f81a3659fc9efef1445e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 7 Aug 2013 17:11:00 -0700 Subject: packet: Revert recent header parsing changes. This reverts commits: 0f75b09c798ed00c30d7d5551b896be883bc2aeb cbd89acb9eb257ed3b2be867142583fdcf7fdc5b c483e02614551e44ced3fe6eedda8e36d3277ccc Amongst other things, it's modifies the SKB header to pull the ethernet headers off via eth_type_trans() on the output path which is bogus. It's causing serious regressions for people. Signed-off-by: David S. Miller diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 0c0f6c9..4cb28a7 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -88,7 +88,6 @@ #include #include #include -#include #ifdef CONFIG_INET #include @@ -1924,7 +1923,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, __be16 proto, unsigned char *addr, int hlen) { union tpacket_uhdr ph; - int to_write, offset, len, tp_len, nr_frags, len_max, max_frame_len; + int to_write, offset, len, tp_len, nr_frags, len_max; struct socket *sock = po->sk.sk_socket; struct page *page; void *data; @@ -1947,6 +1946,10 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, tp_len = ph.h1->tp_len; break; } + if (unlikely(tp_len > size_max)) { + pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); + return -EMSGSIZE; + } skb_reserve(skb, hlen); skb_reset_network_header(skb); @@ -2002,25 +2005,10 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, if (unlikely(err)) return err; - if (dev->type == ARPHRD_ETHER) - skb->protocol = eth_type_trans(skb, dev); - data += dev->hard_header_len; to_write -= dev->hard_header_len; } - max_frame_len = dev->mtu + dev->hard_header_len; - if (skb->protocol == htons(ETH_P_8021Q)) - max_frame_len += VLAN_HLEN; - - if (size_max > max_frame_len) - size_max = max_frame_len; - - if (unlikely(tp_len > size_max)) { - pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); - return -EMSGSIZE; - } - offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); @@ -2059,7 +2047,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) struct net_device *dev; __be16 proto; bool need_rls_dev = false; - int err; + int err, reserve = 0; void *ph; struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; int tp_len, size_max; @@ -2092,6 +2080,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) if (unlikely(dev == NULL)) goto out; + reserve = dev->hard_header_len; + err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; @@ -2099,6 +2089,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); + if (size_max > dev->mtu + reserve) + size_max = dev->mtu + reserve; + do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); @@ -2331,20 +2324,22 @@ static int packet_snd(struct socket *sock, sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); - if (dev->type == ARPHRD_ETHER) { - skb->protocol = eth_type_trans(skb, dev); - if (skb->protocol == htons(ETH_P_8021Q)) - reserve += VLAN_HLEN; - } else { - skb->protocol = proto; - skb->dev = dev; - } - if (!gso_type && (len > dev->mtu + reserve + extra_len)) { - err = -EMSGSIZE; - goto out_free; + /* Earlier code assumed this would be a VLAN pkt, + * double-check this now that we have the actual + * packet in hand. + */ + struct ethhdr *ehdr; + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + if (ehdr->h_proto != htons(ETH_P_8021Q)) { + err = -EMSGSIZE; + goto out_free; + } } + skb->protocol = proto; + skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; -- cgit v0.10.2 From 1f07d03e2069df2ecd82301936b598a3b257c6d6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 6 Aug 2013 03:32:11 -0700 Subject: net: add SNMP counters tracking incoming ECN bits With GRO/LRO processing, there is a problem because Ip[6]InReceives SNMP counters do not count the number of frames, but number of aggregated segments. Its probably too late to change this now. This patch adds four new counters, tracking number of frames, regardless of LRO/GRO, and on a per ECN status basis, for IPv4 and IPv6. Ip[6]NoECTPkts : Number of packets received with NOECT Ip[6]ECT1Pkts : Number of packets received with ECT(1) Ip[6]ECT0Pkts : Number of packets received with ECT(0) Ip[6]CEPkts : Number of packets received with Congestion Experienced lph37:~# nstat | egrep "Pkts|InReceive" IpInReceives 1634137 0.0 Ip6InReceives 3714107 0.0 Ip6InNoECTPkts 19205 0.0 Ip6InECT0Pkts 52651828 0.0 IpExtInNoECTPkts 33630 0.0 IpExtInECT0Pkts 15581379 0.0 IpExtInCEPkts 6 0.0 Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index af0a674..60601d2 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -51,6 +51,10 @@ enum IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */ IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */ IPSTATS_MIB_CSUMERRORS, /* InCsumErrors */ + IPSTATS_MIB_NOECTPKTS, /* InNoECTPkts */ + IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */ + IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ + IPSTATS_MIB_CEPKTS, /* InCEPkts */ __IPSTATS_MIB_MAX }; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 15e3e68..054a3e9 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -141,6 +141,7 @@ #include #include #include +#include #include #include #include @@ -410,6 +411,13 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; + BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1); + BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0); + BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE); + IP_ADD_STATS_BH(dev_net(dev), + IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK), + max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); + if (!pskb_may_pull(skb, iph->ihl*4)) goto inhdr_error; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 6577a11..5f5fa61 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -111,7 +111,7 @@ static const struct snmp_mib snmp4_ipstats_list[] = { SNMP_MIB_SENTINEL }; -/* Following RFC4293 items are displayed in /proc/net/netstat */ +/* Following items are displayed in /proc/net/netstat */ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES), SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS), @@ -125,7 +125,12 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS), SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS), SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS), + /* Non RFC4293 fields */ SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS), + SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS), + SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS), + SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS), + SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 2bab2aa..302d6fb 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -44,7 +44,7 @@ #include #include #include - +#include int ip6_rcv_finish(struct sk_buff *skb) @@ -109,6 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (hdr->version != 6) goto err; + IP6_ADD_STATS_BH(dev_net(dev), idev, + IPSTATS_MIB_NOECTPKTS + + (ipv6_get_dsfield(hdr) & INET_ECN_MASK), + max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); /* * RFC4291 2.5.3 * A packet received on an interface with a destination address diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 51c3285..091d066 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -91,6 +91,10 @@ static const struct snmp_mib snmp6_ipstats_list[] = { SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS), SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS), /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */ + SNMP_MIB_ITEM("Ip6InNoECTPkts", IPSTATS_MIB_NOECTPKTS), + SNMP_MIB_ITEM("Ip6InECT1Pkts", IPSTATS_MIB_ECT1PKTS), + SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS), + SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS), SNMP_MIB_SENTINEL }; -- cgit v0.10.2 From 1ff412ad7714f6952f76ffd77f0a7f2f563288a1 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Tue, 6 Aug 2013 12:40:15 +0200 Subject: bonding: change the bond's vlan syncing functions with the standard ones Now we have vlan_vids_add/del_by_dev() which serve the same purpose as bond's bond_add/del_vlans_on_slave() with the good side effect of reverting the changes if one of the additions fails. There's only 1 change in the behaviour of enslave: if adding of the vlans to the slave fails, we'll fail the enslaving because otherwise we might delete some vlan that wasn't added by the bonding. The only way this may happen is with ENOMEM currently, so we're in trouble anyway. Signed-off-by: Nikolay Aleksandrov Acked-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5697043..78b0aeb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -493,33 +493,6 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, return 0; } -static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev) -{ - struct vlan_entry *vlan; - int res; - - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q), - vlan->vlan_id); - if (res) - pr_warning("%s: Failed to add vlan id %d to device %s\n", - bond->dev->name, vlan->vlan_id, - slave_dev->name); - } -} - -static void bond_del_vlans_from_slave(struct bonding *bond, - struct net_device *slave_dev) -{ - struct vlan_entry *vlan; - - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - if (!vlan->vlan_id) - continue; - vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id); - } -} - /*------------------------------- Link status -------------------------------*/ /* @@ -1630,7 +1603,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) dev_mc_add(slave_dev, lacpdu_multicast); } - bond_add_vlans_on_slave(bond, slave_dev); + if (vlan_vids_add_by_dev(slave_dev, bond_dev)) { + pr_err("%s: Error: Couldn't add bond vlan ids to %s\n", + bond_dev->name, slave_dev->name); + goto err_close; + } write_lock_bh(&bond->lock); @@ -1806,7 +1783,7 @@ err_detach: if (!USES_PRIMARY(bond->params.mode)) bond_hw_addr_flush(bond_dev, slave_dev); - bond_del_vlans_from_slave(bond, slave_dev); + vlan_vids_del_by_dev(slave_dev, bond_dev); write_lock_bh(&bond->lock); bond_detach_slave(bond, new_slave); if (bond->primary_slave == new_slave) @@ -2002,7 +1979,7 @@ static int __bond_release_one(struct net_device *bond_dev, /* must do this from outside any spinlocks */ bond_destroy_slave_symlinks(bond_dev, slave_dev); - bond_del_vlans_from_slave(bond, slave_dev); + vlan_vids_del_by_dev(slave_dev, bond_dev); /* If the mode USES_PRIMARY, then this cases was handled above by * bond_change_active_slave(..., NULL) -- cgit v0.10.2 From b20903f2a9ee5bd9ca80ad72867c8d137aaf4d62 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Tue, 6 Aug 2013 12:40:16 +0200 Subject: bonding: unwind on bond_add_vlan failure In case of bond_add_vlan() failure currently we'll have the vlan's refcnt bumped up in all slaves, but it will never go down because it failed to get added to the bond, so properly unwind the added vlan if bond_add_vlan fails. Signed-off-by: Nikolay Aleksandrov Acked-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 78b0aeb..4264a76 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -455,13 +455,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, if (res) { pr_err("%s: Error: Failed to add vlan id %d\n", bond_dev->name, vid); - return res; + goto unwind; } return 0; unwind: - /* unwind from head to the slave that failed */ + /* unwind from the slave that failed */ bond_for_each_slave_continue_reverse(bond, slave) vlan_vid_del(slave->dev, proto, vid); -- cgit v0.10.2 From c655bc6896b94ee0223393f26155c6daf1e2d148 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 29 Jul 2013 15:41:55 +0200 Subject: netfilter: nf_conntrack: don't send destroy events from iterator Let nf_ct_delete handle delivery of the DESTROY event. Based on earlier patch from Pablo Neira. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index e5eb8b6..0c1288a 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -248,7 +248,9 @@ extern void nf_ct_untracked_status_or(unsigned long bits); /* Iterate over all conntracks: if iter returns true, it's deleted. */ extern void -nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data); +nf_ct_iterate_cleanup(struct net *net, + int (*iter)(struct nf_conn *i, void *data), + void *data, u32 portid, int report); extern void nf_conntrack_free(struct nf_conn *ct); extern struct nf_conn * nf_conntrack_alloc(struct net *net, u16 zone, diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 30e4de9..00352ce 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -118,7 +118,7 @@ static int masq_device_event(struct notifier_block *this, NF_CT_ASSERT(dev->ifindex != 0); nf_ct_iterate_cleanup(net, device_cmp, - (void *)(long)dev->ifindex); + (void *)(long)dev->ifindex, 0, 0); } return NOTIFY_DONE; diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index 47bff61..3e4e92d 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c @@ -76,7 +76,7 @@ static int masq_device_event(struct notifier_block *this, if (event == NETDEV_DOWN) nf_ct_iterate_cleanup(net, device_cmp, - (void *)(long)dev->ifindex); + (void *)(long)dev->ifindex, 0, 0); return NOTIFY_DONE; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0934611..da6f178 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1246,7 +1246,7 @@ found: void nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), - void *data) + void *data, u32 portid, int report) { struct nf_conn *ct; unsigned int bucket = 0; @@ -1254,7 +1254,7 @@ void nf_ct_iterate_cleanup(struct net *net, while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { /* Time to push up daises... */ if (del_timer(&ct->timeout)) - death_by_timeout((unsigned long)ct); + nf_ct_delete(ct, portid, report); /* ... else the timer will get him soon. */ @@ -1263,30 +1263,6 @@ void nf_ct_iterate_cleanup(struct net *net, } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); -struct __nf_ct_flush_report { - u32 portid; - int report; -}; - -static int kill_report(struct nf_conn *i, void *data) -{ - struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; - struct nf_conn_tstamp *tstamp; - - tstamp = nf_conn_tstamp_find(i); - if (tstamp && tstamp->stop == 0) - tstamp->stop = ktime_to_ns(ktime_get_real()); - - /* If we fail to deliver the event, death_by_timeout() will retry */ - if (nf_conntrack_event_report(IPCT_DESTROY, i, - fr->portid, fr->report) < 0) - return 1; - - /* Avoid the delivery of the destroy event in death_by_timeout(). */ - set_bit(IPS_DYING_BIT, &i->status); - return 1; -} - static int kill_all(struct nf_conn *i, void *data) { return 1; @@ -1304,11 +1280,7 @@ EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); void nf_conntrack_flush_report(struct net *net, u32 portid, int report) { - struct __nf_ct_flush_report fr = { - .portid = portid, - .report = report, - }; - nf_ct_iterate_cleanup(net, kill_report, &fr); + nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report); } EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); @@ -1389,7 +1361,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) i_see_dead_people: busy = 0; list_for_each_entry(net, net_exit_list, exit_list) { - nf_ct_iterate_cleanup(net, kill_all, NULL); + nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0); nf_ct_release_dying_list(net); if (atomic_read(&net->ct.count) != 0) busy = 1; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 0ab9636..ce30041 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -281,7 +281,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net, nf_ct_l3proto_unregister_sysctl(net, proto); /* Remove all contrack entries for this protocol */ - nf_ct_iterate_cleanup(net, kill_l3proto, proto); + nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0); } EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister); @@ -476,7 +476,7 @@ void nf_ct_l4proto_pernet_unregister(struct net *net, nf_ct_l4proto_unregister_sysctl(net, pn, l4proto); /* Remove all contrack entries for this protocol */ - nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); + nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0); } EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 038eee5..6ff8083 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -497,7 +497,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) rtnl_lock(); for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); rtnl_unlock(); } @@ -511,7 +511,7 @@ static void nf_nat_l3proto_clean(u8 l3proto) rtnl_lock(); for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); rtnl_unlock(); } @@ -749,7 +749,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) { struct nf_nat_proto_clean clean = {}; - nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0); synchronize_rcu(); nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); } -- cgit v0.10.2 From 495191c79f378462cc85d1e669be3f8b9ef1c8e7 Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Mon, 5 Aug 2013 14:16:45 +0300 Subject: iwlwifi: mvm: Fix beacon filtering enablement via debugfs The code was only enabling it when already enabled, which obviously can't work. Signed-off-by: Alexander Bondar Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 2ee256d..3cdc005 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -895,10 +895,7 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file, if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) { ret = iwl_mvm_disable_beacon_filter(mvm, vif); } else { - if (mvmvif->bf_enabled) - ret = iwl_mvm_enable_beacon_filter(mvm, vif); - else - ret = iwl_mvm_disable_beacon_filter(mvm, vif); + ret = iwl_mvm_enable_beacon_filter(mvm, vif); } mutex_unlock(&mvm->mutex); -- cgit v0.10.2 From f4f3c659846587aae9043d2df31f6434934965e1 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Tue, 6 Aug 2013 18:17:42 +0300 Subject: iwlwifi: introduce external debug level This debug level will be used in the future for logging interaction with external modules. Signed-off-by: Matti Gottlieb Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 7edb851..b2bb32a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -145,6 +145,7 @@ do { \ #define IWL_DL_RX 0x01000000 #define IWL_DL_ISR 0x02000000 #define IWL_DL_HT 0x04000000 +#define IWL_DL_EXTERNAL 0x08000000 /* 0xF0000000 - 0x10000000 */ #define IWL_DL_11H 0x10000000 #define IWL_DL_STATS 0x20000000 @@ -153,6 +154,7 @@ do { \ #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) #define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) -- cgit v0.10.2 From a9b292464311d32441cd3284109263d92c413a48 Mon Sep 17 00:00:00 2001 From: Ido Yariv Date: Mon, 15 Jul 2013 11:51:48 -0400 Subject: iwlwifi: pcie: Refactor iwl_queue_space Reduce the ambiguity spares to a single element if the window size is not smaller than the queue size. If smaller, no spares are required at all. Signed-off-by: Ido Yariv Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 011167c..12c9c00 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -65,18 +65,30 @@ ***************************************************/ static int iwl_queue_space(const struct iwl_queue *q) { - int s = q->read_ptr - q->write_ptr; - - if (q->read_ptr > q->write_ptr) - s -= q->n_bd; - - if (s <= 0) - s += q->n_window; - /* keep some reserve to not confuse empty and full situations */ - s -= 2; - if (s < 0) - s = 0; - return s; + unsigned int max; + unsigned int used; + + /* + * To avoid ambiguity between empty and completely full queues, there + * should always be less than q->n_bd elements in the queue. + * If q->n_window is smaller than q->n_bd, there is no need to reserve + * any queue entries for this purpose. + */ + if (q->n_window < q->n_bd) + max = q->n_window; + else + max = q->n_bd - 1; + + /* + * q->n_bd is a power of 2, so the following is equivalent to modulo by + * q->n_bd and is well defined for negative dividends. + */ + used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1); + + if (WARN_ON(used > max)) + return 0; + + return max - used; } /* -- cgit v0.10.2 From 351746c9ad790260ffe5cc78f41e2a8a1e6ab8b6 Mon Sep 17 00:00:00 2001 From: Ido Yariv Date: Mon, 15 Jul 2013 12:41:27 -0400 Subject: iwlwifi: pcie: Refactor iwl_rxq_space Simplify iwl_rxq_space to improve readability and reduce the ambiguity spares to a single element. Signed-off-by: Ido Yariv Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 5fdb4ee..8c9641b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -112,15 +112,16 @@ */ static int iwl_rxq_space(const struct iwl_rxq *rxq) { - int s = rxq->read - rxq->write; - - if (s <= 0) - s += RX_QUEUE_SIZE; - /* keep some buffer to not confuse full and empty queue */ - s -= 2; - if (s < 0) - s = 0; - return s; + /* Make sure RX_QUEUE_SIZE is a power of 2 */ + BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); + + /* + * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity + * between empty and completely full queues. + * The following is equivalent to modulo by RX_QUEUE_SIZE and is well + * defined for negative dividends. + */ + return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); } /* -- cgit v0.10.2 From 6e8773c31497867b0dbbcaa60391ab3ec9156371 Mon Sep 17 00:00:00 2001 From: Ido Yariv Date: Mon, 15 Jul 2013 16:01:48 -0400 Subject: iwlwifi: pcie: Remove duplicate code from pcie irq handlers Instead of having the same code sequentially, fall-through. Signed-off-by: Ido Yariv Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 8c9641b..3f237b4 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1121,6 +1121,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) struct iwl_trans *trans = data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 inta, inta_mask; + irqreturn_t ret = IRQ_NONE; lockdep_assert_held(&trans_pcie->irq_lock); @@ -1169,10 +1170,8 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) /* the thread will service interrupts and re-enable them */ if (likely(inta)) return IRQ_WAKE_THREAD; - else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && - !trans_pcie->inta) - iwl_enable_interrupts(trans); - return IRQ_HANDLED; + + ret = IRQ_HANDLED; none: /* re-enable interrupts here since we don't have anything to service. */ @@ -1181,7 +1180,7 @@ none: !trans_pcie->inta) iwl_enable_interrupts(trans); - return IRQ_NONE; + return ret; } /* interrupt handler using ict table, with this interrupt driver will @@ -1200,6 +1199,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) u32 val = 0; u32 read; unsigned long flags; + irqreturn_t ret = IRQ_NONE; if (!trans) return IRQ_NONE; @@ -1212,7 +1212,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) * use legacy interrupt. */ if (unlikely(!trans_pcie->use_ict)) { - irqreturn_t ret = iwl_pcie_isr(irq, data); + ret = iwl_pcie_isr(irq, data); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return ret; } @@ -1281,17 +1281,9 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) if (likely(inta)) { spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return IRQ_WAKE_THREAD; - } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && - !trans_pcie->inta) { - /* Allow interrupt if was disabled by this handler and - * no tasklet was schedules, We should not enable interrupt, - * tasklet will enable it. - */ - iwl_enable_interrupts(trans); } - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); - return IRQ_HANDLED; + ret = IRQ_HANDLED; none: /* re-enable interrupts here since we don't have anything to service. @@ -1302,5 +1294,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) iwl_enable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); - return IRQ_NONE; + return ret; } -- cgit v0.10.2 From 0670307992a69a2028e01e6a5fb5f4ab23167954 Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Tue, 6 Aug 2013 15:39:56 -0700 Subject: mac80211: allow lowest basic rate for unicast management for mesh Allow lowest basic rate to be used for unicast management frame in mesh. Otherwise, the lowest supported rate is used for unicast management frame, such as 1Mbps for 2.4GHz and 6Mbps for 5GHz. Rename the rc_send_low_broadcast to re_send_low_basicrate since now it is also applied to unicast management frame in mesh. Signed-off-by: Chun-Yeow Yeoh Signed-off-by: Johannes Berg diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index ba63ac8..e126605 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -210,7 +210,7 @@ static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc) !ieee80211_is_data(fc); } -static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, +static void rc_send_low_basicrate(s8 *idx, u32 basic_rates, struct ieee80211_supported_band *sband) { u8 i; @@ -263,28 +263,37 @@ static void __rate_control_send_low(struct ieee80211_hw *hw, } -bool rate_control_send_low(struct ieee80211_sta *sta, +bool rate_control_send_low(struct ieee80211_sta *pubsta, void *priv_sta, struct ieee80211_tx_rate_control *txrc) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); struct ieee80211_supported_band *sband = txrc->sband; + struct sta_info *sta; int mcast_rate; + bool use_basicrate = false; - if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) { - __rate_control_send_low(txrc->hw, sband, sta, info); + if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) { + __rate_control_send_low(txrc->hw, sband, pubsta, info); - if (!sta && txrc->bss) { + if (!pubsta && txrc->bss) { mcast_rate = txrc->bss_conf->mcast_rate[sband->band]; if (mcast_rate > 0) { info->control.rates[0].idx = mcast_rate - 1; return true; } + use_basicrate = true; + } else if (pubsta) { + sta = container_of(pubsta, struct sta_info, sta); + if (ieee80211_vif_is_mesh(&sta->sdata->vif)) + use_basicrate = true; + } - rc_send_low_broadcast(&info->control.rates[0].idx, + if (use_basicrate) + rc_send_low_basicrate(&info->control.rates[0].idx, txrc->bss_conf->basic_rates, sband); - } + return true; } return false; -- cgit v0.10.2 From ab1e8ad3b463fd15b99a9b3980ec0f84294f6207 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 2 Aug 2013 17:38:01 +0200 Subject: mac80211: fix ieee80211_sta_process_chanswitch for 5/10 MHz channels Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 211246b..45a87ee 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1102,6 +1102,15 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, case -1: cfg80211_chandef_create(&new_chandef, new_chan, NL80211_CHAN_NO_HT); + /* keep width for 5/10 MHz channels */ + switch (sdata->vif.bss_conf.chandef.width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + new_chandef.width = sdata->vif.bss_conf.chandef.width; + break; + default: + break; + } break; } -- cgit v0.10.2 From cab70040dfd95ee32144f02fade64f0cb94f31a0 Mon Sep 17 00:00:00 2001 From: William Manley Date: Tue, 6 Aug 2013 19:03:13 +0100 Subject: net: igmp: Reduce Unsolicited report interval to 1s when using IGMPv3 If an IGMP join packet is lost you will not receive data sent to the multicast group so if no data arrives from that multicast group in a period of time after the IGMP join a second IGMP join will be sent. The delay between joins is the "IGMP Unsolicited Report Interval". Previously this value was hard coded to be chosen randomly between 0-10s. This can be too long for some use-cases, such as IPTV as it can cause channel change to be slow in the presence of packet loss. The value 10s has come from IGMPv2 RFC2236, which was reduced to 1s in IGMPv3 RFC3376. This patch makes the kernel use the 1s value from the later RFC if we are operating in IGMPv3 mode. IGMPv2 behaviour is unaffected. Tested with Wireshark and a simple program to join a (non-existent) multicast group. The distribution of timings for the second join differ based upon setting /proc/sys/net/ipv4/conf/eth0/force_igmp_version. Signed-off-by: William Manley Acked-by: Hannes Frederic Sowa Acked-by: Benjamin LaHaise Signed-off-by: David S. Miller diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ef76186..ceb19ab 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -114,7 +114,8 @@ #define IGMP_V1_Router_Present_Timeout (400*HZ) #define IGMP_V2_Router_Present_Timeout (400*HZ) -#define IGMP_Unsolicited_Report_Interval (10*HZ) +#define IGMP_V2_Unsolicited_Report_Interval (10*HZ) +#define IGMP_V3_Unsolicited_Report_Interval (1*HZ) #define IGMP_Query_Response_Interval (10*HZ) #define IGMP_Unsolicited_Report_Count 2 @@ -139,6 +140,14 @@ ((in_dev)->mr_v2_seen && \ time_before(jiffies, (in_dev)->mr_v2_seen))) +static int unsolicited_report_interval(struct in_device *in_dev) +{ + if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) + return IGMP_V2_Unsolicited_Report_Interval; + else /* v3 */ + return IGMP_V3_Unsolicited_Report_Interval; +} + static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); static void igmpv3_clear_delrec(struct in_device *in_dev); @@ -722,7 +731,8 @@ static void igmp_ifc_timer_expire(unsigned long data) igmpv3_send_cr(in_dev); if (in_dev->mr_ifc_count) { in_dev->mr_ifc_count--; - igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval); + igmp_ifc_start_timer(in_dev, + unsolicited_report_interval(in_dev)); } __in_dev_put(in_dev); } @@ -747,7 +757,7 @@ static void igmp_timer_expire(unsigned long data) if (im->unsolicit_count) { im->unsolicit_count--; - igmp_start_timer(im, IGMP_Unsolicited_Report_Interval); + igmp_start_timer(im, unsolicited_report_interval(in_dev)); } im->reporter = 1; spin_unlock(&im->lock); -- cgit v0.10.2 From 5c6fe01c1fe3140a8fb088d2a9c32548b731c35e Mon Sep 17 00:00:00 2001 From: William Manley Date: Tue, 6 Aug 2013 19:03:14 +0100 Subject: net: igmp: Don't flush routing cache when force_igmp_version is modified The procfs knob /proc/sys/net/ipv4/conf/*/force_igmp_version allows the IGMP protocol version to use to be explicitly set. As a side effect this caused the routing cache to be flushed as it was declared as a DEVINET_SYSCTL_FLUSHING_ENTRY. Flushing is unnecessary and this patch makes it so flushing does not occur. Requested by Hannes Frederic Sowa as he was reviewing other patches adding procfs entries. Suggested-by: Hannes Frederic Sowa Signed-off-by: William Manley Acked-by: Hannes Frederic Sowa Acked-by: Benjamin LaHaise Signed-off-by: David S. Miller diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index b99cd23..0a4a6cb 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -27,9 +27,9 @@ enum IPV4_DEVCONF_TAG, IPV4_DEVCONF_ARPFILTER, IPV4_DEVCONF_MEDIUM_ID, + IPV4_DEVCONF_FORCE_IGMP_VERSION, IPV4_DEVCONF_NOXFRM, IPV4_DEVCONF_NOPOLICY, - IPV4_DEVCONF_FORCE_IGMP_VERSION, IPV4_DEVCONF_ARP_ANNOUNCE, IPV4_DEVCONF_ARP_IGNORE, IPV4_DEVCONF_PROMOTE_SECONDARIES, diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 43923dc..87d47ce 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2094,11 +2094,11 @@ static struct devinet_sysctl_table { DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"), + DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION, + "force_igmp_version"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), - DEVINET_SYSCTL_FLUSHING_ENTRY(FORCE_IGMP_VERSION, - "force_igmp_version"), DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES, "promote_secondaries"), DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET, -- cgit v0.10.2 From 2690048c01f32bf45d1c1e1ab3079bc10ad2aea7 Mon Sep 17 00:00:00 2001 From: William Manley Date: Tue, 6 Aug 2013 19:03:15 +0100 Subject: net: igmp: Allow user-space configuration of igmp unsolicited report interval Adds the new procfs knobs: /proc/sys/net/ipv4/conf/*/igmpv2_unsolicited_report_interval /proc/sys/net/ipv4/conf/*/igmpv3_unsolicited_report_interval Which will allow userspace configuration of the IGMP unsolicited report interval (see below) in milliseconds. The defaults are 10000ms for IGMPv2 and 1000ms for IGMPv3 in accordance with RFC2236 and RFC3376. Background: If an IGMP join packet is lost you will not receive data sent to the multicast group so if no data arrives from that multicast group in a period of time after the IGMP join a second IGMP join will be sent. The delay between joins is the "IGMP Unsolicited Report Interval". Prior to this patch this value was hard coded in the kernel to 10s for IGMPv2 and 1s for IGMPv3. 10s is unsuitable for some use-cases, such as IPTV as it can cause channel change to be slow in the presence of packet loss. This patch allows the value to be overridden from userspace for both IGMPv2 and IGMPv3 such that it can be tuned accoding to the network. Tested with Wireshark and a simple program to join a (non-existent) multicast group. The distribution of timings for the second join differ based upon setting the procfs knobs. igmpvX_unsolicited_report_interval is intended to follow the pattern established by force_igmp_version, and while a procfs entry has been added a corresponding sysctl knob has not as it is my understanding that sysctl is deprecated[1]. [1]: http://lwn.net/Articles/247243/ Signed-off-by: William Manley Acked-by: Hannes Frederic Sowa Acked-by: Benjamin LaHaise Signed-off-by: David S. Miller diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 0a4a6cb..c796ce2 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -28,6 +28,8 @@ enum IPV4_DEVCONF_ARPFILTER, IPV4_DEVCONF_MEDIUM_ID, IPV4_DEVCONF_FORCE_IGMP_VERSION, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL, IPV4_DEVCONF_NOXFRM, IPV4_DEVCONF_NOPOLICY, IPV4_DEVCONF_ARP_ANNOUNCE, diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 87d47ce..a1b5bcb 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -73,6 +73,8 @@ static struct ipv4_devconf ipv4_devconf = { [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1, + [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/, + [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/, }, }; @@ -83,6 +85,8 @@ static struct ipv4_devconf ipv4_devconf_dflt = { [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1, [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1, + [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/, + [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/, }, }; @@ -2096,6 +2100,10 @@ static struct devinet_sysctl_table { DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"), DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION, "force_igmp_version"), + DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL, + "igmpv2_unsolicited_report_interval"), + DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL, + "igmpv3_unsolicited_report_interval"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ceb19ab..d6c0e64 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -142,10 +142,25 @@ static int unsolicited_report_interval(struct in_device *in_dev) { + int interval_ms, interval_jiffies; + if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) - return IGMP_V2_Unsolicited_Report_Interval; + interval_ms = IN_DEV_CONF_GET( + in_dev, + IGMPV2_UNSOLICITED_REPORT_INTERVAL); else /* v3 */ - return IGMP_V3_Unsolicited_Report_Interval; + interval_ms = IN_DEV_CONF_GET( + in_dev, + IGMPV3_UNSOLICITED_REPORT_INTERVAL); + + interval_jiffies = msecs_to_jiffies(interval_ms); + + /* _timer functions can't handle a delay of 0 jiffies so ensure + * we always return a positive value. + */ + if (interval_jiffies <= 0) + interval_jiffies = 1; + return interval_jiffies; } static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); -- cgit v0.10.2 From cda5f98e36576596b9230483ec52bff3cc97eb21 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 6 Aug 2013 21:18:12 +0200 Subject: net: sctp: convert sctp_checksum_disable module param into sctp sysctl Get rid of the last module parameter for SCTP and make this configurable via sysctl for SCTP like all the rest of SCTP's configuration knobs. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 36be26b..1b27b0b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1507,6 +1507,14 @@ sack_timeout - INTEGER Default: 200 +checksum_disable - BOOLEAN + Disable SCTP checksum computing and verification for debugging purpose. + + 1: Disable checksumming + 0: Enable checksumming + + Default: 0 + valid_cookie_life - INTEGER The default lifetime of the SCTP cookie (in milliseconds). The cookie is used during association establishment. diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index 3573a81..ebfdf1e 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -129,6 +129,9 @@ struct netns_sctp { /* Threshold for autoclose timeout, in seconds. */ unsigned long max_autoclose; + + /* Flag to disable SCTP checksumming. */ + int checksum_disable; }; #endif /* __NETNS_SCTP_H__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index d9c93a7..06ebeaa 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -141,10 +141,6 @@ extern struct sctp_globals { /* This is the sctp port control hash. */ int port_hashsize; struct sctp_bind_hashbucket *port_hashtable; - - /* Flag to indicate whether computing and verifying checksum - * is disabled. */ - bool checksum_disable; } sctp_globals; #define sctp_max_instreams (sctp_globals.max_instreams) @@ -156,7 +152,6 @@ extern struct sctp_globals { #define sctp_assoc_hashtable (sctp_globals.assoc_hashtable) #define sctp_port_hashsize (sctp_globals.port_hashsize) #define sctp_port_hashtable (sctp_globals.port_hashtable) -#define sctp_checksum_disable (sctp_globals.checksum_disable) /* SCTP Socket type: UDP or TCP style. */ typedef enum { diff --git a/net/sctp/input.c b/net/sctp/input.c index fa91aff..b9a25e1 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -140,8 +140,8 @@ int sctp_rcv(struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; - if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && - sctp_rcv_checksum(net, skb) < 0) + if (!net->sctp.checksum_disable && !skb_csum_unnecessary(skb) && + sctp_rcv_checksum(net, skb) < 0) goto discard_it; skb_pull(skb, sizeof(struct sctphdr)); diff --git a/net/sctp/output.c b/net/sctp/output.c index 5a55c55d..cdb5f49 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -395,6 +395,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) int padding; /* How much padding do we need? */ __u8 has_data = 0; struct dst_entry *dst = tp->dst; + struct net *net; unsigned char *auth = NULL; /* pointer to auth in skb data */ __u32 cksum_buf_len = sizeof(struct sctphdr); @@ -541,7 +542,9 @@ int sctp_packet_transmit(struct sctp_packet *packet) * Note: Adler-32 is no longer applicable, as has been replaced * by CRC32-C as described in . */ - if (!sctp_checksum_disable) { + net = dev_net(dst->dev); + + if (!net->sctp.checksum_disable) { if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b52ec25..a570a63 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1193,6 +1193,9 @@ static int __net_init sctp_net_init(struct net *net) /* Whether Cookie Preservative is enabled(1) or not(0) */ net->sctp.cookie_preserve_enable = 1; + /* Whether SCTP checksumming is disabled(1) or not(0) */ + net->sctp.checksum_disable = 0; + /* Default sctp sockets to use md5 as their hmac alg */ #if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5) net->sctp.sctp_hmac_alg = "md5"; @@ -1549,6 +1552,4 @@ MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); MODULE_AUTHOR("Linux Kernel SCTP developers "); MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); -module_param_named(no_checksums, sctp_checksum_disable, bool, 0644); -MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification"); MODULE_LICENSE("GPL"); diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 1906747..754809a 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -296,7 +296,15 @@ static struct ctl_table sctp_net_table[] = { .extra1 = &max_autoclose_min, .extra2 = &max_autoclose_max, }, - + { + .procname = "checksum_disable", + .data = &init_net.sctp.checksum_disable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, { /* sentinel */ } }; -- cgit v0.10.2 From 477143e3fece3dc12629bb1ebd7b47e8e6e72b2b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 6 Aug 2013 21:18:13 +0200 Subject: net: sctp: trivial: update bug report in header comment With the restructuring of the lksctp.org site, we only allow bug reports through the SCTP mailing list linux-sctp@vger.kernel.org, not via SF, as SF is only used for web hosting and nothing more. While at it, also remove the obvious statement that bugs will be fixed and incooperated into the kernel. Signed-off-by: Daniel Borkmann Acked-by: Vlad Yasevich Signed-off-by: David S. Miller diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h index 754c511..aa80bef 100644 --- a/include/net/sctp/auth.h +++ b/include/net/sctp/auth.h @@ -24,14 +24,8 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Vlad Yasevich - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_auth_h__ diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index 483e630..259924d 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h @@ -27,9 +27,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Dinakaran Joseph * Jon Grimm @@ -37,9 +34,6 @@ * * Rewritten to use libcrc32c by: * Vlad Yasevich - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_checksum_h__ diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 3524727..832f219 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -23,19 +23,17 @@ * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * - * Please send any bug reports or fixes you make to one of the - * following email addresses: + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers * - * La Monte H.P. Yarroll - * Karl Knutson - * Ardelle Fan - * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. + * Written or modified by: + * La Monte H.P. Yarroll + * Karl Knutson + * Ardelle Fan + * Sridhar Samudrala */ - #ifndef __net_sctp_command_h__ #define __net_sctp_command_h__ diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index a1e7aa5..2f0a565 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -27,9 +27,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -39,9 +36,6 @@ * Xingang Guo * Sridhar Samudrala * Daisy Chang - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_constants_h__ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index cb28df9..3794c5a 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -29,9 +29,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Xingang Guo @@ -41,9 +38,6 @@ * Ardelle Fan * Ryan Layer * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __net_sctp_h__ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 2aa66dd..4ef75af 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -29,9 +29,6 @@ * email addresses: * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -42,9 +39,6 @@ * Daisy Chang * Ardelle Fan * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 06ebeaa..78eedf0 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -27,9 +27,6 @@ * email addresses: * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Randall Stewart * Ken Morneau @@ -46,9 +43,6 @@ * Ryan Layer * Anup Pemmaiah * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_structs_h__ diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h index c361d6f..54bbbe5 100644 --- a/include/net/sctp/tsnmap.h +++ b/include/net/sctp/tsnmap.h @@ -30,17 +30,11 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm * La Monte H.P. Yarroll * Karl Knutson * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 34f0d34..27b9f5c 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -33,17 +33,11 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm * La Monte H.P. Yarroll * Karl Knutson * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_ulpevent_h__ diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index add3237..b0cf5d5 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -32,16 +32,10 @@ * email addresses: * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm * La Monte H.P. Yarroll * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #ifndef __sctp_ulpqueue_h__ diff --git a/net/sctp/associola.c b/net/sctp/associola.c index e425ba0..e051920 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -30,9 +30,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -43,9 +40,6 @@ * Daisy Chang * Ryan Layer * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 3aab967..8c4fa5d 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -24,14 +24,8 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Vlad Yasevich - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index f34ce8b..077bb07 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -29,17 +29,11 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson * Jon Grimm * Daisy Chang - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index b50b90c..bd0bdd0 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -26,15 +26,9 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/command.c b/net/sctp/command.c index f4bebda..3d9a9ff 100644 --- a/net/sctp/command.c +++ b/net/sctp/command.c @@ -27,15 +27,9 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 44aa4e7..e89015d 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -30,9 +30,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -40,9 +37,6 @@ * Jon Grimm * Daisy Chang * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 825b754..09b8daa 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -31,18 +31,12 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson * Jon Grimm * Daisy Chang * Dajiang Zhang - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/input.c b/net/sctp/input.c index b9a25e1..2873e22 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -31,9 +31,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -43,9 +40,6 @@ * Daisy Chang * Sridhar Samudrala * Ardelle Fan - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index e70f60a..5856932 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -32,15 +32,9 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 5a9402e..da613ce 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -29,9 +29,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Le Yanqun * Hui Huang @@ -42,9 +39,6 @@ * * Based on: * linux/net/ipv6/tcp_ipv6.c - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c index aec346c..5ea573b 100644 --- a/net/sctp/objcnt.c +++ b/net/sctp/objcnt.c @@ -28,14 +28,8 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/output.c b/net/sctp/output.c index cdb5f49..e35b84c 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -28,17 +28,11 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson * Jon Grimm * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 5131323..94df758 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -30,9 +30,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -41,9 +38,6 @@ * Hui Huang * Sridhar Samudrala * Jon Grimm - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c index 31fa437..ce1ffd8 100644 --- a/net/sctp/primitive.c +++ b/net/sctp/primitive.c @@ -31,18 +31,12 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Narasimha Budihal * Karl Knutson * Ardelle Fan * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/proc.c b/net/sctp/proc.c index aff0cac..82432bf 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -24,14 +24,8 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index a570a63..5448297 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -31,9 +31,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -41,9 +38,6 @@ * Sridhar Samudrala * Daisy Chang * Ardelle Fan - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 780a2d4..9c13133 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -31,9 +31,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -45,9 +42,6 @@ * Daisy Chang * Ardelle Fan * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index f1f3aac..666c668 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -30,9 +30,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -42,9 +39,6 @@ * Daisy Chang * Sridhar Samudrala * Ardelle Fan - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 93271f0..dfe3f36 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -30,9 +30,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -45,9 +42,6 @@ * Ardelle Fan * Ryan Layer * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 64ea5fd..c5999b2 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -30,9 +30,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -41,9 +38,6 @@ * Daisy Chang * Ardelle Fan * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0245712..d5d5882 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -36,9 +36,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Narasimha Budihal @@ -52,9 +49,6 @@ * Ryan Layer * Anup Pemmaiah * Kevin Gao - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 72b5939..6007124 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c @@ -26,14 +26,8 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 754809a..1b1ee76 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -27,18 +27,12 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Mingqin Liu * Jon Grimm * Ardelle Fan * Ryan Layer * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 9602c52..6836ead 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -32,9 +32,6 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Karl Knutson @@ -43,9 +40,6 @@ * Hui Huang * Sridhar Samudrala * Ardelle Fan - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 0eff866..fbda200 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c @@ -29,17 +29,11 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * La Monte H.P. Yarroll * Jon Grimm * Karl Knutson * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index c07624f..81089ed 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -30,17 +30,11 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm * La Monte H.P. Yarroll * Ardelle Fan * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 2cdb301..1c1484e 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -29,16 +29,10 @@ * email address(es): * lksctp developers * - * Or submit a bug report through the following website: - * http://www.sf.net/projects/lksctp - * * Written or modified by: * Jon Grimm * La Monte H.P. Yarroll * Sridhar Samudrala - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release. */ #include -- cgit v0.10.2 From 3f4e854a2d7d5fdb2f6e9e693903085a21ea0d5e Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 5 Aug 2013 18:51:57 -0700 Subject: mwifiex: rename mef macros Their names were generic. We need to define similar macros for coalesce feature. Hence they are renamed here. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index c5e21ed..326f4d9 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2327,12 +2327,12 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq) dont_care_byte = true; } - if (valid_byte_cnt > MAX_BYTESEQ) + if (valid_byte_cnt > MWIFIEX_MEF_MAX_BYTESEQ) return false; } } - byte_seq[MAX_BYTESEQ] = valid_byte_cnt; + byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = valid_byte_cnt; return true; } @@ -2345,7 +2345,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, struct mwifiex_mef_entry *mef_entry; int i, filt_num = 0, ret; bool first_pat = true; - u8 byte_seq[MAX_BYTESEQ + 1]; + u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1]; const u8 ipv4_mc_mac[] = {0x33, 0x33}; const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e}; struct mwifiex_private *priv = @@ -2383,16 +2383,16 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, if (!wowlan->patterns[i].pkt_offset) { if (!(byte_seq[0] & 0x01) && - (byte_seq[MAX_BYTESEQ] == 1)) { + (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) { mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST; continue; } else if (is_broadcast_ether_addr(byte_seq)) { mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST; continue; } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) && - (byte_seq[MAX_BYTESEQ] == 2)) || + (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) || (!memcmp(byte_seq, ipv6_mc_mac, 3) && - (byte_seq[MAX_BYTESEQ] == 3))) { + (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) { mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST; continue; } @@ -2418,7 +2418,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, mef_entry->filter[filt_num].repeat = 16; memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr, ETH_ALEN); - mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN; + mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = + ETH_ALEN; mef_entry->filter[filt_num].offset = 14; mef_entry->filter[filt_num].filt_type = TYPE_EQ; if (filt_num) @@ -2491,7 +2492,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #ifdef CONFIG_PM static const struct wiphy_wowlan_support mwifiex_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT, - .n_patterns = MWIFIEX_MAX_FILTERS, + .n_patterns = MWIFIEX_MEF_MAX_FILTERS, .pattern_min_len = 1, .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index 7f27e45..5ecda45 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -362,13 +362,13 @@ struct mwifiex_ds_misc_subsc_evt { struct subsc_evt_cfg bcn_h_rssi_cfg; }; -#define MAX_BYTESEQ 6 /* non-adjustable */ -#define MWIFIEX_MAX_FILTERS 10 +#define MWIFIEX_MEF_MAX_BYTESEQ 6 /* non-adjustable */ +#define MWIFIEX_MEF_MAX_FILTERS 10 struct mwifiex_mef_filter { u16 repeat; u16 offset; - s8 byte_seq[MAX_BYTESEQ + 1]; + s8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1]; u8 filt_type; u8 filt_action; }; @@ -376,7 +376,7 @@ struct mwifiex_mef_filter { struct mwifiex_mef_entry { u8 mode; u8 action; - struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS]; + struct mwifiex_mef_filter filter[MWIFIEX_MEF_MAX_FILTERS]; }; struct mwifiex_ds_mef_cfg { diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 9b75ed8..448baf1 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -1070,7 +1070,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv, int i, byte_len; u8 *stack_ptr = *buffer; - for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) { + for (i = 0; i < MWIFIEX_MEF_MAX_FILTERS; i++) { filter = &mef_entry->filter[i]; if (!filter->filt_type) break; @@ -1079,7 +1079,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv, *stack_ptr = TYPE_DNUM; stack_ptr += 1; - byte_len = filter->byte_seq[MAX_BYTESEQ]; + byte_len = filter->byte_seq[MWIFIEX_MEF_MAX_BYTESEQ]; memcpy(stack_ptr, filter->byte_seq, byte_len); stack_ptr += byte_len; *stack_ptr = byte_len; -- cgit v0.10.2 From 0434c464999ca73586be2c513aaa09fd1dc38595 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 5 Aug 2013 18:51:58 -0700 Subject: mwifiex: modify mwifiex_is_pattern_supported() routine It is modified so that it can be reused for coalesce feature. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 326f4d9..62c3d98 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2309,7 +2309,8 @@ EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); #ifdef CONFIG_PM static bool -mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq) +mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq, + u8 max_byte_seq) { int j, k, valid_byte_cnt = 0; bool dont_care_byte = false; @@ -2327,12 +2328,12 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq) dont_care_byte = true; } - if (valid_byte_cnt > MWIFIEX_MEF_MAX_BYTESEQ) + if (valid_byte_cnt > max_byte_seq) return false; } } - byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = valid_byte_cnt; + byte_seq[max_byte_seq] = valid_byte_cnt; return true; } @@ -2375,7 +2376,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, for (i = 0; i < wowlan->n_patterns; i++) { memset(byte_seq, 0, sizeof(byte_seq)); if (!mwifiex_is_pattern_supported(&wowlan->patterns[i], - byte_seq)) { + byte_seq, + MWIFIEX_MEF_MAX_BYTESEQ)) { wiphy_err(wiphy, "Pattern not supported\n"); kfree(mef_entry); return -EOPNOTSUPP; -- cgit v0.10.2 From afd84de47309cbdf96ea80d073233834f28393a5 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 5 Aug 2013 18:51:59 -0700 Subject: mwifiex: increase max supported pattern offset The offset number is increased to accomodate requests from user to match more fields in a Rx packet. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index c0dfc4d..12c6223 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -450,7 +450,7 @@ enum P2P_MODES { (((event_cause) >> 24) & 0x00ff) #define MWIFIEX_MAX_PATTERN_LEN 20 -#define MWIFIEX_MAX_OFFSET_LEN 50 +#define MWIFIEX_MAX_OFFSET_LEN 100 #define STACK_NBYTES 100 #define TYPE_DNUM 1 #define TYPE_BYTESEQ 2 -- cgit v0.10.2 From 562fc5b30f228d4a8c1dad90c82897a5440d7a0b Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Mon, 5 Aug 2013 18:52:00 -0700 Subject: mwifiex: add packet coalesce support Coalesce filters are configured in firmware based on settings received from cfg80211. Packet type which is required by firmware is determined based on provided patterns in a rule: Unicast: if pattern '01' with offset 0 is found Multicast: if pattern '33:33' or '01:00:5e' with offset 0 is found Broadcast: if pattern 'ff:ff:ff:ff' with offset 0 is found Some example coalesce configuration files: 1) Coalesce Rx data packets from 192.168.0.88 mac address of our device is 00:50:43:21:53:7A Source IP address offset comes out as 52 after following calculations: 32 bytes of HW 802.11 header + 8 bytes LLC + 12 bytes in IPV4 header till source IP address Destination mac is at offset 6 in HW header. delay=100 condition=1 patterns=01,6+00:50:43:22,10+53:7A,52+c0:a8:00:58 2) Coalesce all broadcast and multicast packets(Multiple packet types are not allowed in a single rule. Hence created separate rules) delay=400 condition=1 patterns=33:33 delay=400 condition=1 patterns=ff:ff:ff:ff Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 62c3d98..07d2318 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2455,6 +2455,119 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy, } #endif +static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq) +{ + const u8 ipv4_mc_mac[] = {0x33, 0x33}; + const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e}; + const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff}; + + if ((byte_seq[0] & 0x01) && + (byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 1)) + return PACKET_TYPE_UNICAST; + else if (!memcmp(byte_seq, bc_mac, 4)) + return PACKET_TYPE_BROADCAST; + else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) && + byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 2) || + (!memcmp(byte_seq, ipv6_mc_mac, 3) && + byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 3)) + return PACKET_TYPE_MULTICAST; + + return 0; +} + +static int +mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv, + struct cfg80211_coalesce_rules *crule, + struct mwifiex_coalesce_rule *mrule) +{ + u8 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ + 1]; + struct filt_field_param *param; + int i; + + mrule->max_coalescing_delay = crule->delay; + + param = mrule->params; + + for (i = 0; i < crule->n_patterns; i++) { + memset(byte_seq, 0, sizeof(byte_seq)); + if (!mwifiex_is_pattern_supported(&crule->patterns[i], + byte_seq, + MWIFIEX_COALESCE_MAX_BYTESEQ)) { + dev_err(priv->adapter->dev, "Pattern not supported\n"); + return -EOPNOTSUPP; + } + + if (!crule->patterns[i].pkt_offset) { + u8 pkt_type; + + pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq); + if (pkt_type && mrule->pkt_type) { + dev_err(priv->adapter->dev, + "Multiple packet types not allowed\n"); + return -EOPNOTSUPP; + } else if (pkt_type) { + mrule->pkt_type = pkt_type; + continue; + } + } + + if (crule->condition == NL80211_COALESCE_CONDITION_MATCH) + param->operation = RECV_FILTER_MATCH_TYPE_EQ; + else + param->operation = RECV_FILTER_MATCH_TYPE_NE; + + param->operand_len = byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ]; + memcpy(param->operand_byte_stream, byte_seq, + param->operand_len); + param->offset = crule->patterns[i].pkt_offset; + param++; + + mrule->num_of_fields++; + } + + if (!mrule->pkt_type) { + dev_err(priv->adapter->dev, + "Packet type can not be determined\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy, + struct cfg80211_coalesce *coalesce) +{ + struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); + int i, ret; + struct mwifiex_ds_coalesce_cfg coalesce_cfg; + struct mwifiex_private *priv = + mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + + memset(&coalesce_cfg, 0, sizeof(coalesce_cfg)); + if (!coalesce) { + dev_dbg(adapter->dev, + "Disable coalesce and reset all previous rules\n"); + return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG, + HostCmd_ACT_GEN_SET, 0, + &coalesce_cfg); + } + + coalesce_cfg.num_of_rules = coalesce->n_rules; + for (i = 0; i < coalesce->n_rules; i++) { + ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i], + &coalesce_cfg.rule[i]); + if (ret) { + dev_err(priv->adapter->dev, + "Recheck the patterns provided for rule %d\n", + i + 1); + return ret; + } + } + + return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG, + HostCmd_ACT_GEN_SET, 0, &coalesce_cfg); +} + /* station cfg80211 operations */ static struct cfg80211_ops mwifiex_cfg80211_ops = { .add_virtual_intf = mwifiex_add_virtual_intf, @@ -2488,6 +2601,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .suspend = mwifiex_cfg80211_suspend, .resume = mwifiex_cfg80211_resume, .set_wakeup = mwifiex_cfg80211_set_wakeup, + .set_coalesce = mwifiex_cfg80211_set_coalesce, #endif }; @@ -2512,6 +2626,15 @@ static bool mwifiex_is_valid_alpha2(const char *alpha2) return false; } +static const struct wiphy_coalesce_support mwifiex_coalesce_support = { + .n_rules = MWIFIEX_COALESCE_MAX_RULES, + .max_delay = MWIFIEX_MAX_COALESCING_DELAY, + .n_patterns = MWIFIEX_COALESCE_MAX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, + .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, +}; + /* * This function registers the device with CFG802.11 subsystem. * @@ -2573,6 +2696,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->wowlan = &mwifiex_wowlan_support; #endif + wiphy->coalesce = &mwifiex_coalesce_support; + wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 12c6223..c9ad1c0 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -153,6 +153,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123) #define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145) #define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) +#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -294,6 +295,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_CAU_REG_ACCESS 0x00ed #define HostCmd_CMD_SET_BSS_MODE 0x00f7 #define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa +#define HostCmd_CMD_COALESCE_CFG 0x010a #define HostCmd_CMD_MGMT_FRAME_REG 0x010c #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d #define HostCmd_CMD_11AC_CFG 0x0112 @@ -1596,6 +1598,27 @@ struct host_cmd_ds_802_11_cfg_data { __le16 data_len; } __packed; +struct coalesce_filt_field_param { + u8 operation; + u8 operand_len; + __le16 offset; + u8 operand_byte_stream[4]; +}; + +struct coalesce_receive_filt_rule { + struct mwifiex_ie_types_header header; + u8 num_of_fields; + u8 pkt_type; + __le16 max_coalescing_delay; + struct coalesce_filt_field_param params[0]; +} __packed; + +struct host_cmd_ds_coalesce_cfg { + __le16 action; + __le16 num_of_rules; + struct coalesce_receive_filt_rule rule[0]; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -1656,6 +1679,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_sta_deauth sta_deauth; struct host_cmd_11ac_vht_cfg vht_cfg; struct host_cmd_ds_802_11_cfg_data cfg_data; + struct host_cmd_ds_coalesce_cfg coalesce_cfg; } params; } __packed; diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index 5ecda45..00a95f4 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -397,4 +397,39 @@ enum { MWIFIEX_FUNC_SHUTDOWN, }; +enum COALESCE_OPERATION { + RECV_FILTER_MATCH_TYPE_EQ = 0x80, + RECV_FILTER_MATCH_TYPE_NE, +}; + +enum COALESCE_PACKET_TYPE { + PACKET_TYPE_UNICAST = 1, + PACKET_TYPE_MULTICAST = 2, + PACKET_TYPE_BROADCAST = 3 +}; + +#define MWIFIEX_COALESCE_MAX_RULES 8 +#define MWIFIEX_COALESCE_MAX_BYTESEQ 4 /* non-adjustable */ +#define MWIFIEX_COALESCE_MAX_FILTERS 4 +#define MWIFIEX_MAX_COALESCING_DELAY 100 /* in msecs */ + +struct filt_field_param { + u8 operation; + u8 operand_len; + u16 offset; + u8 operand_byte_stream[MWIFIEX_COALESCE_MAX_BYTESEQ]; +}; + +struct mwifiex_coalesce_rule { + u16 max_coalescing_delay; + u8 num_of_fields; + u8 pkt_type; + struct filt_field_param params[MWIFIEX_COALESCE_MAX_FILTERS]; +}; + +struct mwifiex_ds_coalesce_cfg { + u16 num_of_rules; + struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES]; +}; + #endif /* !_MWIFIEX_IOCTL_H_ */ diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 448baf1..c0268b5 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -1184,6 +1184,70 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, return 0; } +static int +mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, void *data_buf) +{ + struct host_cmd_ds_coalesce_cfg *coalesce_cfg = + &cmd->params.coalesce_cfg; + struct mwifiex_ds_coalesce_cfg *cfg = data_buf; + struct coalesce_filt_field_param *param; + u16 cnt, idx, length; + struct coalesce_receive_filt_rule *rule; + + cmd->command = cpu_to_le16(HostCmd_CMD_COALESCE_CFG); + cmd->size = cpu_to_le16(S_DS_GEN); + + coalesce_cfg->action = cpu_to_le16(cmd_action); + coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules); + rule = coalesce_cfg->rule; + + for (cnt = 0; cnt < cfg->num_of_rules; cnt++) { + rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE); + rule->max_coalescing_delay = + cpu_to_le16(cfg->rule[cnt].max_coalescing_delay); + rule->pkt_type = cfg->rule[cnt].pkt_type; + rule->num_of_fields = cfg->rule[cnt].num_of_fields; + + length = 0; + + param = rule->params; + for (idx = 0; idx < cfg->rule[cnt].num_of_fields; idx++) { + param->operation = cfg->rule[cnt].params[idx].operation; + param->operand_len = + cfg->rule[cnt].params[idx].operand_len; + param->offset = + cpu_to_le16(cfg->rule[cnt].params[idx].offset); + memcpy(param->operand_byte_stream, + cfg->rule[cnt].params[idx].operand_byte_stream, + param->operand_len); + + length += sizeof(struct coalesce_filt_field_param); + + param++; + } + + /* Total rule length is sizeof max_coalescing_delay(u16), + * num_of_fields(u8), pkt_type(u8) and total length of the all + * params + */ + rule->header.len = cpu_to_le16(length + sizeof(u16) + + sizeof(u8) + sizeof(u8)); + + /* Add the rule length to the command size*/ + le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) + + sizeof(struct mwifiex_ie_types_header)); + + rule = (void *)((u8 *)rule->params + length); + } + + /* Add sizeof action, num_of_rules to total command length */ + le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16)); + + return 0; +} + /* * This function prepares the commands before sending them to the firmware. * @@ -1407,6 +1471,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_MEF_CFG: ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf); break; + case HostCmd_CMD_COALESCE_CFG: + ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action, + data_buf); + break; default: dev_err(priv->adapter->dev, "PREP_CMD: unknown cmd- %#x\n", cmd_no); diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index d85df15..6a814eb 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -997,6 +997,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_MEF_CFG: break; + case HostCmd_CMD_COALESCE_CFG: + break; default: dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", resp->command); -- cgit v0.10.2 From 36e8825e65c0b1d9511feceb6c464d8925e7c791 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 6 Aug 2013 12:44:15 +0530 Subject: ath9k: Fix build failure Make sure that CONFIG_ATH9K_BTCOEX_SUPPORT is used for the WLAN/BT RX diversity hooks. Reported by the kernel build testing backend. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 456d8b9..1fc1fa9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -555,6 +555,8 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); } +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) { struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; @@ -614,6 +616,8 @@ static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); } +#endif + static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, struct ath_spec_scan *param) { @@ -689,10 +693,13 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah) ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set; - ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity; ops->spectral_scan_config = ar9002_hw_spectral_scan_config; ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger; ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait; +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity; +#endif + ar9002_hw_set_nf_limits(ah); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 4898829..39c3730 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1412,6 +1412,8 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); } +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) { struct ath9k_hw_capabilities *pCap = &ah->caps; @@ -1513,6 +1515,8 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) } } +#endif + static int ar9003_hw_fast_chan_change(struct ath_hw *ah, struct ath9k_channel *chan, u8 *ini_reloaded) @@ -1688,11 +1692,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah) ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set; - ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity; ops->spectral_scan_config = ar9003_hw_spectral_scan_config; ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger; ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait; +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity; +#endif + ar9003_hw_set_nf_limits(ah); ar9003_hw_set_radar_conf(ah); memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs)); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index a155190..414584e 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -270,6 +270,8 @@ static const struct file_operations fops_ani = { .llseek = default_llseek, }; +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + static ssize_t read_file_bt_ant_diversity(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -323,6 +325,8 @@ static const struct file_operations fops_bt_ant_diversity = { .llseek = default_llseek, }; +#endif + void ath9k_debug_stat_ant(struct ath_softc *sc, struct ath_hw_antcomb_conf *div_ant_conf, int main_rssi_avg, int alt_rssi_avg) @@ -1935,11 +1939,11 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); - debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity); debugfs_create_file("antenna_diversity", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_antenna_diversity); #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity); debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_btcoex); #endif diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index a78d48c..83f4927 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -78,12 +78,16 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah, ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf); } +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) { if (ath9k_hw_ops(ah)->set_bt_ant_diversity) ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable); } +#endif + /* Private hardware call ops */ /* PHY ops */ diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 38f461c..64ff8e6 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -719,11 +719,14 @@ struct ath_hw_ops { struct ath_hw_antcomb_conf *antconf); void (*antdiv_comb_conf_set)(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf); - void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable); void (*spectral_scan_config)(struct ath_hw *ah, struct ath_spec_scan *param); void (*spectral_scan_trigger)(struct ath_hw *ah); void (*spectral_scan_wait)(struct ath_hw *ah); + +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable); +#endif }; struct ath_nf_limits { -- cgit v0.10.2 From 6cdfc1de173aa916c820195a040752bb603a21dd Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Tue, 6 Aug 2013 17:36:03 +0900 Subject: net: wireless: rt2x00: Staticize rt2x00queue_pause_queue_nocheck() rt2x00queue_pause_queue_nocheck()is used only in this file. Fix the following sparse warning: drivers/net/wireless/rt2x00/rt2x00queue.c:939:6: warning: symbol 'rt2x00queue_pause_queue_nocheck' was not declared. Should it be static? Signed-off-by: Jingoo Han Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index aa95c6c..6c8a33b 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -936,7 +936,7 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) spin_unlock_irqrestore(&queue->index_lock, irqflags); } -void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) +static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) { switch (queue->qid) { case QID_AC_VO: -- cgit v0.10.2 From e33354764dff11905f3f36f8da72ae46f177d890 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Aug 2013 11:13:19 +0200 Subject: brcmfmac: use CFG80211_TESTMODE_CMD This is essentially the same, but written shorter. Signed-off-by: Johannes Berg Acked-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 7fa71f7..c3dfea3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4164,9 +4164,7 @@ static struct cfg80211_ops wl_cfg80211_ops = { .stop_p2p_device = brcmf_p2p_stop_device, .crit_proto_start = brcmf_cfg80211_crit_proto_start, .crit_proto_stop = brcmf_cfg80211_crit_proto_stop, -#ifdef CONFIG_NL80211_TESTMODE - .testmode_cmd = brcmf_cfg80211_testmode -#endif + CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode) }; static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type) -- cgit v0.10.2 From a7586ee441d50f0379a202d45d8fd0a7952cc918 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:02 +0200 Subject: ath9k: add utility functions for accessing tid queues Useful for further fixes / cleanups Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 52cd521..eaa9eaf 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -168,6 +168,16 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, } } +static bool ath_tid_has_buffered(struct ath_atx_tid *tid) +{ + return !skb_queue_empty(&tid->buf_q); +} + +static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid) +{ + return __skb_dequeue(&tid->buf_q); +} + static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { struct ath_txq *txq = tid->ac->txq; @@ -182,7 +192,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) memset(&ts, 0, sizeof(ts)); - while ((skb = __skb_dequeue(&tid->buf_q))) { + while ((skb = ath_tid_dequeue(tid))) { fi = get_frame_info(skb); bf = fi->bf; @@ -266,7 +276,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, memset(&ts, 0, sizeof(ts)); INIT_LIST_HEAD(&bf_head); - while ((skb = __skb_dequeue(&tid->buf_q))) { + while ((skb = ath_tid_dequeue(tid))) { fi = get_frame_info(skb); bf = fi->bf; @@ -815,7 +825,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, static struct ath_buf * ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid) + struct ath_atx_tid *tid, struct sk_buff_head **q) { struct ath_frame_info *fi; struct sk_buff *skb; @@ -823,7 +833,8 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, u16 seqno; while (1) { - skb = skb_peek(&tid->buf_q); + *q = &tid->buf_q; + skb = skb_peek(*q); if (!skb) break; @@ -833,7 +844,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, bf = ath_tx_setup_buffer(sc, txq, tid, skb); if (!bf) { - __skb_unlink(skb, &tid->buf_q); + __skb_unlink(skb, *q); ath_txq_skb_done(sc, txq, skb); ieee80211_free_txskb(sc->hw, skb); continue; @@ -852,7 +863,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, INIT_LIST_HEAD(&bf_head); list_add(&bf->list, &bf_head); - __skb_unlink(skb, &tid->buf_q); + __skb_unlink(skb, *q); ath_tx_update_baw(sc, tid, seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); continue; @@ -881,9 +892,10 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; struct sk_buff *skb; + struct sk_buff_head *tid_q; do { - bf = ath_tx_get_tid_subframe(sc, txq, tid); + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); if (!bf) { status = ATH_AGGR_BAW_CLOSED; break; @@ -940,14 +952,14 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); bf->bf_state.ndelim = ndelim; - __skb_unlink(skb, &tid->buf_q); + __skb_unlink(skb, tid_q); list_add_tail(&bf->list, bf_q); if (bf_prev) bf_prev->bf_next = bf; bf_prev = bf; - } while (!skb_queue_empty(&tid->buf_q)); + } while (ath_tid_has_buffered(tid)); *aggr_len = al; @@ -1250,7 +1262,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, int aggr_len; do { - if (skb_queue_empty(&tid->buf_q)) + if (!ath_tid_has_buffered(tid)) return; INIT_LIST_HEAD(&bf_q); @@ -1354,7 +1366,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, ath_txq_lock(sc, txq); - buffered = !skb_queue_empty(&tid->buf_q); + buffered = ath_tid_has_buffered(tid); tid->sched = false; list_del(&tid->list); @@ -1386,7 +1398,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ath_txq_lock(sc, txq); ac->clear_ps_filter = true; - if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { + if (!tid->paused && ath_tid_has_buffered(tid)) { ath_tx_queue_tid(txq, tid); ath_txq_schedule(sc, txq); } @@ -1411,7 +1423,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; tid->paused = false; - if (!skb_queue_empty(&tid->buf_q)) { + if (ath_tid_has_buffered(tid)) { ath_tx_queue_tid(txq, tid); ath_txq_schedule(sc, txq); } @@ -1431,6 +1443,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_tx_info *info; struct list_head bf_q; struct ath_buf *bf_tail = NULL, *bf; + struct sk_buff_head *tid_q; int sent = 0; int i; @@ -1446,12 +1459,12 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, continue; ath_txq_lock(sc, tid->ac->txq); - while (!skb_queue_empty(&tid->buf_q) && nframes > 0) { - bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid); + while (nframes > 0) { + bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q); if (!bf) break; - __skb_unlink(bf->bf_mpdu, &tid->buf_q); + __skb_unlink(bf->bf_mpdu, tid_q); list_add_tail(&bf->list, &bf_q); ath_set_rates(tid->an->vif, tid->an->sta, bf); ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); @@ -1464,7 +1477,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, sent++; TX_STAT_INC(txq->axq_qnum, a_queued_hw); - if (skb_queue_empty(&tid->buf_q)) + if (!ath_tid_has_buffered(tid)) ieee80211_sta_set_buffered(an->sta, i, false); } ath_txq_unlock_complete(sc, tid->ac->txq); @@ -1750,7 +1763,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) * add tid to round-robin queue if more frames * are pending for the tid */ - if (!skb_queue_empty(&tid->buf_q)) + if (ath_tid_has_buffered(tid)) ath_tx_queue_tid(txq, tid); if (tid == last_tid || @@ -1859,7 +1872,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, * - seqno is not within block-ack window * - h/w queue depth exceeds low water mark */ - if ((!skb_queue_empty(&tid->buf_q) || tid->paused || + if ((ath_tid_has_buffered(tid) || tid->paused || !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) && txq != sc->tx.uapsdq) { -- cgit v0.10.2 From bb195ff61f2e205886ec246465f0809e5c6cd52f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:03 +0200 Subject: ath9k: split tid retry packets into a separate queue Improves packet retry order and helps with further tx queueing improvements. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index c497d52..2ce79c1 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -241,6 +241,7 @@ struct ath_buf { struct ath_atx_tid { struct list_head list; struct sk_buff_head buf_q; + struct sk_buff_head retry_q; struct ath_node *an; struct ath_atx_ac *ac; unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index eaa9eaf..c2b6cf0 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -170,12 +170,18 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, static bool ath_tid_has_buffered(struct ath_atx_tid *tid) { - return !skb_queue_empty(&tid->buf_q); + return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q); } static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid) { - return __skb_dequeue(&tid->buf_q); + struct sk_buff *skb; + + skb = __skb_dequeue(&tid->retry_q); + if (!skb) + skb = __skb_dequeue(&tid->buf_q); + + return skb; } static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) @@ -593,7 +599,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (an->sleeping) ieee80211_sta_set_buffered(sta, tid->tidno, true); - skb_queue_splice(&bf_pending, &tid->buf_q); + skb_queue_splice_tail(&bf_pending, &tid->retry_q); if (!an->sleeping) { ath_tx_queue_tid(txq, tid); @@ -833,7 +839,10 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, u16 seqno; while (1) { - *q = &tid->buf_q; + *q = &tid->retry_q; + if (skb_queue_empty(*q)) + *q = &tid->buf_q; + skb = skb_peek(*q); if (!skb) break; @@ -2636,6 +2645,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->paused = false; tid->active = false; __skb_queue_head_init(&tid->buf_q); + __skb_queue_head_init(&tid->retry_q); acno = TID_TO_WME_AC(tidno); tid->ac = &an->ac[acno]; } -- cgit v0.10.2 From 1803d02d7a262e451b26e6134786cced02ab2d1e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:04 +0200 Subject: ath9k: add function for getting the tx tid for a packet Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index c2b6cf0..350429b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -168,6 +168,20 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, } } +static struct ath_atx_tid * +ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + u8 tidno = 0; + + hdr = (struct ieee80211_hdr *) skb->data; + if (ieee80211_is_data_qos(hdr->frame_control)) + tidno = ieee80211_get_qos_ctl(hdr)[0]; + + tidno &= IEEE80211_QOS_CTL_TID_MASK; + return ATH_AN_2_TID(an, tidno); +} + static bool ath_tid_has_buffered(struct ath_atx_tid *tid) { return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q); @@ -419,7 +433,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ieee80211_tx_rate rates[4]; struct ath_frame_info *fi; int nframes; - u8 tidno; bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); int i, retries; int bar_index = -1; @@ -456,8 +469,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } an = (struct ath_node *)sta->drv_priv; - tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; - tid = ATH_AN_2_TID(an, tidno); + tid = ath_get_skb_tid(sc, an, skb); seq_first = tid->seq_start; isba = ts->ts_flags & ATH9K_TX_BA; @@ -469,7 +481,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * Only BlockAcks have a TID and therefore normal Acks cannot be * checked */ - if (isba && tidno != ts->tid) + if (isba && tid->tidno != ts->tid) txok = false; isaggr = bf_isaggr(bf); @@ -2116,7 +2128,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath_txq *txq = txctl->txq; struct ath_atx_tid *tid = NULL; struct ath_buf *bf; - u8 tidno; int q; int ret; @@ -2147,9 +2158,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, } if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { - tidno = ieee80211_get_qos_ctl(hdr)[0] & - IEEE80211_QOS_CTL_TID_MASK; - tid = ATH_AN_2_TID(txctl->an, tidno); + tid = ath_get_skb_tid(sc, txctl->an, skb); WARN_ON(tid->ac->txq != txctl->txq); } -- cgit v0.10.2 From 18fcf1c6a6ff4b00fe06e1d0ef05ff76ed2fe86c Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:05 +0200 Subject: ath9k: add CAB queue info to debugfs Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 414584e..c10cec5 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -732,6 +732,28 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, return retval; } +static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq, + char *buf, ssize_t size) +{ + ssize_t len = 0; + + ath_txq_lock(sc, txq); + + len += snprintf(buf + len, size - len, "%s: %d ", + "qnum", txq->axq_qnum); + len += snprintf(buf + len, size - len, "%s: %2d ", + "qdepth", txq->axq_depth); + len += snprintf(buf + len, size - len, "%s: %2d ", + "ampdu-depth", txq->axq_ampdu_depth); + len += snprintf(buf + len, size - len, "%s: %3d ", + "pending", txq->pending_frames); + len += snprintf(buf + len, size - len, "%s: %d\n", + "stopped", txq->stopped); + + ath_txq_unlock(sc, txq); + return len; +} + static ssize_t read_file_queues(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -749,24 +771,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf, for (i = 0; i < IEEE80211_NUM_ACS; i++) { txq = sc->tx.txq_map[i]; - len += snprintf(buf + len, size - len, "(%s): ", qname[i]); - - ath_txq_lock(sc, txq); - - len += snprintf(buf + len, size - len, "%s: %d ", - "qnum", txq->axq_qnum); - len += snprintf(buf + len, size - len, "%s: %2d ", - "qdepth", txq->axq_depth); - len += snprintf(buf + len, size - len, "%s: %2d ", - "ampdu-depth", txq->axq_ampdu_depth); - len += snprintf(buf + len, size - len, "%s: %3d ", - "pending", txq->pending_frames); - len += snprintf(buf + len, size - len, "%s: %d\n", - "stopped", txq->stopped); - - ath_txq_unlock(sc, txq); + len += snprintf(buf + len, size - len, "(%s): ", qname[i]); + len += print_queue(sc, txq, buf + len, size - len); } + len += snprintf(buf + len, size - len, "(CAB): "); + len += print_queue(sc, sc->beacon.cabq, buf + len, size - len); + if (len > size) len = size; -- cgit v0.10.2 From a1cd94d345a8f68300c8ccd8422434e54199f68d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:06 +0200 Subject: ath9k: simplify ath_tx_form_aggr The check for ATH_AMPDU_SUBFRAME_DEFAULT is unnecessary, since it's set to half the maximum BlockAck Window size, which is already the maximum value that h_baw could possibly have. Also remove unnecessary variables. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 2ce79c1..725515f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -137,7 +137,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_AGGR_ENCRYPTDELIM 10 /* minimum h/w qdepth to be sustained to maximize aggregation */ #define ATH_AGGR_MIN_QDEPTH 2 -#define ATH_AMPDU_SUBFRAME_DEFAULT 32 #define IEEE80211_SEQ_SEQ_SHIFT 4 #define IEEE80211_SEQ_MAX 4096 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 350429b..887f2d4 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -906,9 +906,9 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, { #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; - int rl = 0, nframes = 0, ndelim, prev_al = 0; + int nframes = 0, ndelim; u16 aggr_limit = 0, al = 0, bpad = 0, - al_delta, h_baw = tid->baw_size / 2; + al_delta, h_baw = tid->baw_size / 2; enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; @@ -925,33 +925,24 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, skb = bf->bf_mpdu; fi = get_frame_info(skb); - if (!bf_first) + if (!bf_first) { bf_first = bf; - - if (!rl) { ath_set_rates(tid->an->vif, tid->an->sta, bf); aggr_limit = ath_lookup_rate(sc, bf, tid); - rl = 1; } /* do not exceed aggregation limit */ al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; + if (nframes) { + if (aggr_limit < al + bpad + al_delta || + ath_lookup_legacy(bf) || nframes >= h_baw) { + status = ATH_AGGR_LIMITED; + break; + } - if (nframes && - ((aggr_limit < (al + bpad + al_delta + prev_al)) || - ath_lookup_legacy(bf))) { - status = ATH_AGGR_LIMITED; - break; - } - - tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); - if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) - break; - - /* do not exceed subframe limit */ - if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { - status = ATH_AGGR_LIMITED; - break; + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); + if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + break; } /* add padding for previous frame to aggregation length */ -- cgit v0.10.2 From 8fed14085595c703612b5b712ffe114ef0370929 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:07 +0200 Subject: ath9k: fix block ack window tracking check When a packet has been tracked as part of the BlockAck window and added to the hardware queue, it can end up back in the TID queue again with fi->retries still set to 0 (e.g. if the frame was filtered). Keep an extra bit for the BAW tracking status to fix this corner case. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 725515f..126f980 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -211,8 +211,9 @@ struct ath_frame_info { int framelen; enum ath9k_key_type keytype; u8 keyix; - u8 retries; u8 rtscts_rate; + u8 retries : 7; + u8 baw_tracked : 1; }; struct ath_buf_state { diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 887f2d4..d66e8b8 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -225,7 +225,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) } } - if (fi->retries) { + if (fi->baw_tracked) { list_add_tail(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); @@ -262,13 +262,16 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, } static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, - u16 seqno) + struct ath_buf *bf) { + struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); + u16 seqno = bf->bf_state.seqno; int index, cindex; index = ATH_BA_INDEX(tid->seq_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); __set_bit(cindex, tid->tx_buf); + fi->baw_tracked = 1; if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) { @@ -960,8 +963,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, bf->bf_next = NULL; /* link buffers of this frame to the aggregate */ - if (!fi->retries) - ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); + if (!fi->baw_tracked) + ath_tx_addto_baw(sc, tid, bf); bf->bf_state.ndelim = ndelim; __skb_unlink(skb, tid_q); @@ -1479,7 +1482,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, __skb_unlink(bf->bf_mpdu, tid_q); list_add_tail(&bf->list, &bf_q); ath_set_rates(tid->an->vif, tid->an->sta, bf); - ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); + ath_tx_addto_baw(sc, tid, bf); bf->bf_state.bf_type &= ~BUF_AGGR; if (bf_tail) bf_tail->bf_next = bf; @@ -1912,7 +1915,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, list_add(&bf->list, &bf_head); /* Add sub-frame to BAW */ - ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); + ath_tx_addto_baw(sc, tid, bf); /* Queue to h/w without aggregation */ TX_STAT_INC(txq->axq_qnum, a_queued_hw); -- cgit v0.10.2 From 73364b0c470a9c0361a389f3460357a7c7ffd75d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:08 +0200 Subject: ath9k: prepare queueing code for handling unaggregated traffic - Allow ath_tx_get_tid_subframe to return non-AMPDU subframes. - Reset the tid paused state on aggregation stop - Initialize software queues even when HT is not supported Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index afeab3c..252497a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1371,9 +1371,6 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_node *an = (struct ath_node *) sta->drv_priv; - if (!sta->ht_cap.ht_supported) - return; - switch (cmd) { case STA_NOTIFY_SLEEP: an->sleeping = true; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d66e8b8..e69f7fe 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -672,7 +672,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, } else ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush) + if (!flush) ath_txq_schedule(sc, txq); } @@ -848,6 +848,7 @@ static struct ath_buf * ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, struct sk_buff_head **q) { + struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; struct sk_buff *skb; struct ath_buf *bf; @@ -874,6 +875,16 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, continue; } + bf->bf_next = NULL; + bf->bf_lastbf = bf; + + tx_info = IEEE80211_SKB_CB(skb); + tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; + if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { + bf->bf_state.bf_type = 0; + return bf; + } + bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; seqno = bf->bf_state.seqno; @@ -893,8 +904,6 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, continue; } - bf->bf_next = NULL; - bf->bf_lastbf = bf; return bf; } @@ -1356,7 +1365,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) ath_txq_lock(sc, txq); txtid->active = false; - txtid->paused = true; + txtid->paused = false; ath_tx_flush_tid(sc, txtid); ath_txq_unlock_complete(sc, txq); } @@ -2425,8 +2434,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) if (list_empty(&txq->axq_q)) { txq->axq_link = NULL; - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) - ath_txq_schedule(sc, txq); + ath_txq_schedule(sc, txq); break; } bf = list_first_entry(&txq->axq_q, struct ath_buf, list); -- cgit v0.10.2 From 897d7fd9b5f1bee657d000b882c642541bb2ba3e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:09 +0200 Subject: ath9k: fix clearing expired A-MPDU subframes in tx completion When the tid aggregation state has been marked as inactive, free completed tx packets immediately. When a new aggregation session has not been initialized yet, the BAW checks do not recognize it as expired. Might fix potential stalls in setting up a new aggregation session. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index e69f7fe..3ede3e9 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -520,7 +520,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, tx_info = IEEE80211_SKB_CB(skb); fi = get_frame_info(skb); - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) || + !tid->active) { /* * Outside of the current BlockAck window, * maybe part of a previous session -- cgit v0.10.2 From 026d5b07c03458f9c0ccd19c3850564a5409c325 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:10 +0200 Subject: ath9k: always clear ps filter bit on new assoc Otherwise in some cases, EAPOL frames might be filtered during the initial handshake, causing delays and assoc failures. Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 3ede3e9..dfa85f1 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2665,6 +2665,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) for (acno = 0, ac = &an->ac[acno]; acno < IEEE80211_NUM_ACS; acno++, ac++) { ac->sched = false; + ac->clear_ps_filter = true; ac->txq = sc->tx.txq_map[acno]; INIT_LIST_HEAD(&ac->tid_q); } -- cgit v0.10.2 From 2800e82bcc3ba8d2a2bbb42557e2f320f8b27da1 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:11 +0200 Subject: ath9k: use software queues for un-aggregated data packets This is a first step for improving fairness between legacy and 802.11n traffic, and it should also improve reliability of resets and channel changes by keeping the hardware queue depth very short. When an aggregation session is torn down, all packets in the retry queue will be removed from the BAW and freed. For all subframes that have not been transmitted yet, the A-MPDU flag will be cleared, and a sequence number allocated. This ensures that the next A-MPDU session will get the correct initial sequence number. This happens both on aggregation session start and stop. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 126f980..41ea3fd 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -137,6 +137,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_AGGR_ENCRYPTDELIM 10 /* minimum h/w qdepth to be sustained to maximize aggregation */ #define ATH_AGGR_MIN_QDEPTH 2 +/* minimum h/w qdepth for non-aggregated traffic */ +#define ATH_NON_AGGR_MIN_QDEPTH 8 #define IEEE80211_SEQ_SEQ_SHIFT 4 #define IEEE80211_SEQ_MAX 4096 @@ -173,12 +175,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_TX_COMPLETE_POLL_INT 1000 -enum ATH_AGGR_STATUS { - ATH_AGGR_DONE, - ATH_AGGR_BAW_CLOSED, - ATH_AGGR_LIMITED, -}; - #define ATH_TXFIFO_DEPTH 8 struct ath_txq { int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */ diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index dfa85f1..3b66f2b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -198,6 +198,41 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid) return skb; } +/* + * ath_tx_tid_change_state: + * - clears a-mpdu flag of previous session + * - force sequence number allocation to fix next BlockAck Window + */ +static void +ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = tid->ac->txq; + struct ieee80211_tx_info *tx_info; + struct sk_buff *skb, *tskb; + struct ath_buf *bf; + struct ath_frame_info *fi; + + skb_queue_walk_safe(&tid->buf_q, skb, tskb) { + fi = get_frame_info(skb); + bf = fi->bf; + + tx_info = IEEE80211_SKB_CB(skb); + tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU; + + if (bf) + continue; + + bf = ath_tx_setup_buffer(sc, txq, tid, skb); + if (!bf) { + __skb_unlink(skb, &tid->buf_q); + ath_txq_skb_done(sc, txq, skb); + ieee80211_free_txskb(sc->hw, skb); + continue; + } + } + +} + static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { struct ath_txq *txq = tid->ac->txq; @@ -212,28 +247,22 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) memset(&ts, 0, sizeof(ts)); - while ((skb = ath_tid_dequeue(tid))) { + while ((skb = __skb_dequeue(&tid->retry_q))) { fi = get_frame_info(skb); bf = fi->bf; - if (!bf) { - bf = ath_tx_setup_buffer(sc, txq, tid, skb); - if (!bf) { - ath_txq_skb_done(sc, txq, skb); - ieee80211_free_txskb(sc->hw, skb); - continue; - } + ath_txq_skb_done(sc, txq, skb); + ieee80211_free_txskb(sc->hw, skb); + continue; } if (fi->baw_tracked) { - list_add_tail(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf->bf_state.seqno); - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); sendbar = true; - } else { - ath_set_rates(tid->an->vif, tid->an->sta, bf); - ath_tx_send_normal(sc, txq, NULL, skb); } + + list_add_tail(&bf->list, &bf_head); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); } if (sendbar) { @@ -911,50 +940,39 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, return NULL; } -static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, - struct ath_txq *txq, - struct ath_atx_tid *tid, - struct list_head *bf_q, - int *aggr_len) +static bool +ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, struct list_head *bf_q, + struct ath_buf *bf_first, struct sk_buff_head *tid_q, + int *aggr_len) { #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) - struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; + struct ath_buf *bf = bf_first, *bf_prev = NULL; int nframes = 0, ndelim; u16 aggr_limit = 0, al = 0, bpad = 0, al_delta, h_baw = tid->baw_size / 2; - enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; struct sk_buff *skb; - struct sk_buff_head *tid_q; + bool closed = false; - do { - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); - if (!bf) { - status = ATH_AGGR_BAW_CLOSED; - break; - } + bf = bf_first; + aggr_limit = ath_lookup_rate(sc, bf, tid); + do { skb = bf->bf_mpdu; fi = get_frame_info(skb); - if (!bf_first) { - bf_first = bf; - ath_set_rates(tid->an->vif, tid->an->sta, bf); - aggr_limit = ath_lookup_rate(sc, bf, tid); - } - /* do not exceed aggregation limit */ al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; if (nframes) { if (aggr_limit < al + bpad + al_delta || - ath_lookup_legacy(bf) || nframes >= h_baw) { - status = ATH_AGGR_LIMITED; + ath_lookup_legacy(bf) || nframes >= h_baw) break; - } tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); - if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || + !(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) break; } @@ -984,11 +1002,26 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, bf_prev = bf; + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); + if (!bf) { + closed = true; + break; + } } while (ath_tid_has_buffered(tid)); + bf = bf_first; + bf->bf_lastbf = bf_prev; + + if (bf == bf_prev) { + al = get_frame_info(bf->bf_mpdu)->framelen; + bf->bf_state.bf_type = BUF_AMPDU; + } else { + TX_STAT_INC(txq->axq_qnum, a_aggr); + } + *aggr_len = al; - return status; + return closed; #undef PADBYTES } @@ -1277,14 +1310,50 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, } } +static void +ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, struct list_head *bf_q, + struct ath_buf *bf_first, struct sk_buff_head *tid_q) +{ + struct ath_buf *bf = bf_first, *bf_prev = NULL; + struct sk_buff *skb; + int nframes = 0; + + do { + struct ieee80211_tx_info *tx_info; + skb = bf->bf_mpdu; + + nframes++; + __skb_unlink(skb, tid_q); + list_add_tail(&bf->list, bf_q); + if (bf_prev) + bf_prev->bf_next = bf; + bf_prev = bf; + + if (nframes >= 2) + break; + + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); + if (!bf) + break; + + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) + break; + + ath_set_rates(tid->an->vif, tid->an->sta, bf); + } while (1); +} + static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid) { struct ath_buf *bf; - enum ATH_AGGR_STATUS status; struct ieee80211_tx_info *tx_info; + struct sk_buff_head *tid_q; struct list_head bf_q; - int aggr_len; + int aggr_len = 0; + bool aggr, last = true; do { if (!ath_tid_has_buffered(tid)) @@ -1292,38 +1361,34 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, INIT_LIST_HEAD(&bf_q); - status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); - - /* - * no frames picked up to be aggregated; - * block-ack window is not open. - */ - if (list_empty(&bf_q)) + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); + if (!bf) break; - bf = list_first_entry(&bf_q, struct ath_buf, list); - bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); + aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU); + if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || + (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) + break; + + ath_set_rates(tid->an->vif, tid->an->sta, bf); + if (aggr) + last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf, + tid_q, &aggr_len); + else + ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q); + + if (list_empty(&bf_q)) + return; if (tid->ac->clear_ps_filter) { tid->ac->clear_ps_filter = false; tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; - } else { - tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; - } - - /* if only one frame, send as non-aggregate */ - if (bf == bf->bf_lastbf) { - aggr_len = get_frame_info(bf->bf_mpdu)->framelen; - bf->bf_state.bf_type = BUF_AMPDU; - } else { - TX_STAT_INC(txq->axq_qnum, a_aggr); } ath_tx_fill_desc(sc, bf, txq, aggr_len); ath_tx_txqaddbuf(sc, txq, &bf_q, false); - } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && - status != ATH_AGGR_BAW_CLOSED); + } while (!last); } int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, @@ -1347,6 +1412,9 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an->mpdudensity = density; } + /* force sequence number allocation for pending frames */ + ath_tx_tid_change_state(sc, txtid); + txtid->active = true; txtid->paused = true; *ssn = txtid->seq_start = txtid->seq_next; @@ -1368,6 +1436,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) txtid->active = false; txtid->paused = false; ath_tx_flush_tid(sc, txtid); + ath_tx_tid_change_state(sc, txtid); ath_txq_unlock_complete(sc, txq); } @@ -1882,58 +1951,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, } } -static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid, struct sk_buff *skb, - struct ath_tx_control *txctl) -{ - struct ath_frame_info *fi = get_frame_info(skb); - struct list_head bf_head; - struct ath_buf *bf; - - /* - * Do not queue to h/w when any of the following conditions is true: - * - there are pending frames in software queue - * - the TID is currently paused for ADDBA/BAR request - * - seqno is not within block-ack window - * - h/w queue depth exceeds low water mark - */ - if ((ath_tid_has_buffered(tid) || tid->paused || - !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) && - txq != sc->tx.uapsdq) { - /* - * Add this frame to software queue for scheduling later - * for aggregation. - */ - TX_STAT_INC(txq->axq_qnum, a_queued_sw); - __skb_queue_tail(&tid->buf_q, skb); - if (!txctl->an || !txctl->an->sleeping) - ath_tx_queue_tid(txq, tid); - return; - } - - bf = ath_tx_setup_buffer(sc, txq, tid, skb); - if (!bf) { - ath_txq_skb_done(sc, txq, skb); - ieee80211_free_txskb(sc->hw, skb); - return; - } - - ath_set_rates(tid->an->vif, tid->an->sta, bf); - bf->bf_state.bf_type = BUF_AMPDU; - INIT_LIST_HEAD(&bf_head); - list_add(&bf->list, &bf_head); - - /* Add sub-frame to BAW */ - ath_tx_addto_baw(sc, tid, bf); - - /* Queue to h/w without aggregation */ - TX_STAT_INC(txq->axq_qnum, a_queued_hw); - bf->bf_lastbf = bf; - ath_tx_fill_desc(sc, bf, txq, fi->framelen); - ath_tx_txqaddbuf(sc, txq, &bf_head, false); -} - static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, struct sk_buff *skb) { @@ -2159,20 +2176,25 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, ath_txq_unlock(sc, txq); txq = sc->tx.uapsdq; ath_txq_lock(sc, txq); - } - - if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { + } else if (txctl->an && + ieee80211_is_data_present(hdr->frame_control)) { tid = ath_get_skb_tid(sc, txctl->an, skb); WARN_ON(tid->ac->txq != txctl->txq); - } - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { + if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) + tid->ac->clear_ps_filter = true; + /* - * Try aggregation if it's a unicast data frame - * and the destination is HT capable. + * Add this frame to software queue for scheduling later + * for aggregation. */ - ath_tx_send_ampdu(sc, txq, tid, skb, txctl); + TX_STAT_INC(txq->axq_qnum, a_queued_sw); + __skb_queue_tail(&tid->buf_q, skb); + if (!txctl->an->sleeping) + ath_tx_queue_tid(txq, tid); + + ath_txq_schedule(sc, txq); goto out; } -- cgit v0.10.2 From 020f20f693c254a6daa727473d6e855a63c3e502 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:12 +0200 Subject: ath9k: improve tx scheduling fairness Instead of trying to schedule the same TID multiple times in a loop, iterate over other TIDs/stations first. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 3b66f2b..f9e6eb2 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1345,8 +1345,8 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, } while (1); } -static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid) +static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, bool *stop) { struct ath_buf *bf; struct ieee80211_tx_info *tx_info; @@ -1355,40 +1355,41 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, int aggr_len = 0; bool aggr, last = true; - do { - if (!ath_tid_has_buffered(tid)) - return; + if (!ath_tid_has_buffered(tid)) + return false; - INIT_LIST_HEAD(&bf_q); + INIT_LIST_HEAD(&bf_q); - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); - if (!bf) - break; + bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q); + if (!bf) + return false; - tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); - aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU); - if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || - (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) - break; + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); + aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU); + if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || + (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) { + *stop = true; + return false; + } - ath_set_rates(tid->an->vif, tid->an->sta, bf); - if (aggr) - last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf, - tid_q, &aggr_len); - else - ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q); + ath_set_rates(tid->an->vif, tid->an->sta, bf); + if (aggr) + last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf, + tid_q, &aggr_len); + else + ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q); - if (list_empty(&bf_q)) - return; + if (list_empty(&bf_q)) + return false; - if (tid->ac->clear_ps_filter) { - tid->ac->clear_ps_filter = false; - tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; - } + if (tid->ac->clear_ps_filter) { + tid->ac->clear_ps_filter = false; + tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + } - ath_tx_fill_desc(sc, bf, txq, aggr_len); - ath_tx_txqaddbuf(sc, txq, &bf_q, false); - } while (!last); + ath_tx_fill_desc(sc, bf, txq, aggr_len); + ath_tx_txqaddbuf(sc, txq, &bf_q, false); + return true; } int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, @@ -1824,25 +1825,27 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) */ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { - struct ath_atx_ac *ac, *ac_tmp, *last_ac; + struct ath_atx_ac *ac, *last_ac; struct ath_atx_tid *tid, *last_tid; + bool sent = false; if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || - list_empty(&txq->axq_acq) || - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) + list_empty(&txq->axq_acq)) return; rcu_read_lock(); - ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); + while (!list_empty(&txq->axq_acq)) { + bool stop = false; - list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { + ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); list_del(&ac->list); ac->sched = false; while (!list_empty(&ac->tid_q)) { + tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); list_del(&tid->list); @@ -1851,7 +1854,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) if (tid->paused) continue; - ath_tx_sched_aggr(sc, txq, tid); + if (ath_tx_sched_aggr(sc, txq, tid, &stop)) + sent = true; /* * add tid to round-robin queue if more frames @@ -1860,8 +1864,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) if (ath_tid_has_buffered(tid)) ath_tx_queue_tid(txq, tid); - if (tid == last_tid || - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) + if (stop || tid == last_tid) break; } @@ -1870,9 +1873,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) list_add_tail(&ac->list, &txq->axq_acq); } - if (ac == last_ac || - txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) + if (stop) break; + + if (ac == last_ac) { + if (!sent) + break; + + sent = false; + last_ac = list_entry(txq->axq_acq.prev, + struct ath_atx_ac, list); + } } rcu_read_unlock(); -- cgit v0.10.2 From f89d1bc4271153ac7864051778c0f2443e5cf2cd Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 6 Aug 2013 14:18:13 +0200 Subject: ath9k: use software queueing for multicast traffic Create a per-vif dummy node entry for keeping the multicast software queues. This helps in setups with a lot of mulitcast traffic that could otherwise potentially drown out unicast traffic to stations. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 41ea3fd..505c615 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -265,6 +265,7 @@ struct ath_node { u8 mpdudensity; bool sleeping; + bool no_ps_filter; #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS) struct dentry *node_stat; @@ -364,6 +365,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, /********/ struct ath_vif { + struct ath_node mcast_node; int av_bslot; bool primary_sta_vif; __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 252497a..911744f 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -963,6 +963,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); + struct ath_vif *avp = (void *)vif->drv_priv; + struct ath_node *an = &avp->mcast_node; mutex_lock(&sc->mutex); @@ -976,6 +978,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, if (ath9k_uses_beacons(vif->type)) ath9k_beacon_assign_slot(sc, vif); + an->sc = sc; + an->sta = NULL; + an->vif = vif; + an->no_ps_filter = true; + ath_tx_node_init(sc, an); + mutex_unlock(&sc->mutex); return 0; } @@ -1013,6 +1021,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp = (void *)vif->drv_priv; ath_dbg(common, CONFIG, "Detach Interface\n"); @@ -1027,6 +1036,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, ath9k_calculate_summary_state(hw, NULL); ath9k_ps_restore(sc); + ath_tx_node_cleanup(sc, &avp->mcast_node); + mutex_unlock(&sc->mutex); } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index f9e6eb2..d8dfb3e 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) { + if (!tid->an->sta) + return; + ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, seqno << IEEE80211_SEQ_SEQ_SHIFT); } @@ -1382,7 +1385,7 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, if (list_empty(&bf_q)) return false; - if (tid->ac->clear_ps_filter) { + if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) { tid->ac->clear_ps_filter = false; tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; } @@ -1572,7 +1575,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, sent++; TX_STAT_INC(txq->axq_qnum, a_queued_hw); - if (!ath_tid_has_buffered(tid)) + if (an->sta && !ath_tid_has_buffered(tid)) ieee80211_sta_set_buffered(an->sta, i, false); } ath_txq_unlock_complete(sc, tid->ac->txq); @@ -2104,6 +2107,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = txctl->sta; struct ieee80211_vif *vif = info->control.vif; + struct ath_vif *avp; struct ath_softc *sc = hw->priv; int frmlen = skb->len + FCS_LEN; int padpos, padsize; @@ -2111,6 +2115,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, /* NOTE: sta can be NULL according to net/mac80211.h */ if (sta) txctl->an = (struct ath_node *)sta->drv_priv; + else if (vif && ieee80211_is_data(hdr->frame_control)) { + avp = (void *)vif->drv_priv; + txctl->an = &avp->mcast_node; + } if (info->control.hw_key) frmlen += info->control.hw_key->icv_len; -- cgit v0.10.2 From 7d845871ffcafee22889c286ff3f1fb1f64be0c0 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 7 Aug 2013 12:29:27 +0530 Subject: ath9k: Fix BTCOEX usage for RX diversity BTCOEX has to be *disabled* for WLAN RX diversity to work on combo cards. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 4afe30e..3b56c2e 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -645,11 +645,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, /* * Enable WLAN/BT RX Antenna diversity only when: * - * - BTCOEX is enabled + * - BTCOEX is disabled. * - the user manually requests the feature. * - the HW cap is set using the platform data. */ - if (common->btcoex_enabled && ath9k_bt_ant_diversity && + if (!common->btcoex_enabled && ath9k_bt_ant_diversity && (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV)) common->bt_ant_diversity = 1; -- cgit v0.10.2 From 16fe28e9b4a7a5def2b20f79488546b6e2e3f2e7 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 7 Aug 2013 12:54:30 +0530 Subject: ath9k: Run the LNA combining algorithm properly The LNA combining algorithm has to be run for cards that support the required diversity features, make sure that that correct conditions are met before enabing this algorithm. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 865e043..62dff97 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1157,6 +1157,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; struct ieee80211_rx_status *rxs; struct ath_hw *ah = sc->sc_ah; + struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; @@ -1328,11 +1329,30 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) skb = hdr_skb; } + if (rxs->flag & RX_FLAG_MMIC_STRIPPED) + skb_trim(skb, skb->len - 8); - if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { + spin_lock_irqsave(&sc->sc_pm_lock, flags); + if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA)) || + ath9k_check_auto_sleep(sc)) + ath_rx_ps(sc, skb, rs.is_mybeacon); + spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + /* + * Run the LNA combining algorithm only in these cases: + * + * Standalone WLAN cards with both LNA/Antenna diversity + * enabled in the EEPROM. + * + * WLAN+BT cards which are in the supported card list + * in ath_pci_id_table and the user has loaded the + * driver with "bt_ant_diversity" set to true. + */ + if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { /* - * change the default rx antenna if rx diversity + * Change the default rx antenna if rx diversity * chooses the other antenna 3 times in a row. */ if (sc->rx.defant != rs.rs_antenna) { @@ -1342,22 +1362,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) sc->rx.rxotherant = 0; } + if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { + if (common->bt_ant_diversity) + ath_ant_comb_scan(sc, &rs); + } else { + ath_ant_comb_scan(sc, &rs); + } } - if (rxs->flag & RX_FLAG_MMIC_STRIPPED) - skb_trim(skb, skb->len - 8); - - spin_lock_irqsave(&sc->sc_pm_lock, flags); - if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | - PS_WAIT_FOR_CAB | - PS_WAIT_FOR_PSPOLL_DATA)) || - ath9k_check_auto_sleep(sc)) - ath_rx_ps(sc, skb, rs.is_mybeacon); - spin_unlock_irqrestore(&sc->sc_pm_lock, flags); - - if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) - ath_ant_comb_scan(sc, &rs); - ath9k_apply_ampdu_details(sc, &rs, rxs); ieee80211_rx(hw, skb); -- cgit v0.10.2 From 71acc0ddd499cc323199fb1ae350ce9ea0744352 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 9 Aug 2013 13:09:41 -0700 Subject: Revert "net: sctp: convert sctp_checksum_disable module param into sctp sysctl" This reverts commit cda5f98e36576596b9230483ec52bff3cc97eb21. As per Vlad's request. Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 1b27b0b..36be26b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1507,14 +1507,6 @@ sack_timeout - INTEGER Default: 200 -checksum_disable - BOOLEAN - Disable SCTP checksum computing and verification for debugging purpose. - - 1: Disable checksumming - 0: Enable checksumming - - Default: 0 - valid_cookie_life - INTEGER The default lifetime of the SCTP cookie (in milliseconds). The cookie is used during association establishment. diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index ebfdf1e..3573a81 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -129,9 +129,6 @@ struct netns_sctp { /* Threshold for autoclose timeout, in seconds. */ unsigned long max_autoclose; - - /* Flag to disable SCTP checksumming. */ - int checksum_disable; }; #endif /* __NETNS_SCTP_H__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 78eedf0..422db6c 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -135,6 +135,10 @@ extern struct sctp_globals { /* This is the sctp port control hash. */ int port_hashsize; struct sctp_bind_hashbucket *port_hashtable; + + /* Flag to indicate whether computing and verifying checksum + * is disabled. */ + bool checksum_disable; } sctp_globals; #define sctp_max_instreams (sctp_globals.max_instreams) @@ -146,6 +150,7 @@ extern struct sctp_globals { #define sctp_assoc_hashtable (sctp_globals.assoc_hashtable) #define sctp_port_hashsize (sctp_globals.port_hashsize) #define sctp_port_hashtable (sctp_globals.port_hashtable) +#define sctp_checksum_disable (sctp_globals.checksum_disable) /* SCTP Socket type: UDP or TCP style. */ typedef enum { diff --git a/net/sctp/input.c b/net/sctp/input.c index 2873e22..5f20686 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -134,8 +134,8 @@ int sctp_rcv(struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; - if (!net->sctp.checksum_disable && !skb_csum_unnecessary(skb) && - sctp_rcv_checksum(net, skb) < 0) + if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && + sctp_rcv_checksum(net, skb) < 0) goto discard_it; skb_pull(skb, sizeof(struct sctphdr)); diff --git a/net/sctp/output.c b/net/sctp/output.c index e35b84c..0ac3a65 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -389,7 +389,6 @@ int sctp_packet_transmit(struct sctp_packet *packet) int padding; /* How much padding do we need? */ __u8 has_data = 0; struct dst_entry *dst = tp->dst; - struct net *net; unsigned char *auth = NULL; /* pointer to auth in skb data */ __u32 cksum_buf_len = sizeof(struct sctphdr); @@ -536,9 +535,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) * Note: Adler-32 is no longer applicable, as has been replaced * by CRC32-C as described in . */ - net = dev_net(dst->dev); - - if (!net->sctp.checksum_disable) { + if (!sctp_checksum_disable) { if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 5448297..5e17092 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1187,9 +1187,6 @@ static int __net_init sctp_net_init(struct net *net) /* Whether Cookie Preservative is enabled(1) or not(0) */ net->sctp.cookie_preserve_enable = 1; - /* Whether SCTP checksumming is disabled(1) or not(0) */ - net->sctp.checksum_disable = 0; - /* Default sctp sockets to use md5 as their hmac alg */ #if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5) net->sctp.sctp_hmac_alg = "md5"; @@ -1546,4 +1543,6 @@ MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); MODULE_AUTHOR("Linux Kernel SCTP developers "); MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); +module_param_named(no_checksums, sctp_checksum_disable, bool, 0644); +MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification"); MODULE_LICENSE("GPL"); diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 1b1ee76..6b36561 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -290,15 +290,7 @@ static struct ctl_table sctp_net_table[] = { .extra1 = &max_autoclose_min, .extra2 = &max_autoclose_max, }, - { - .procname = "checksum_disable", - .data = &init_net.sctp.checksum_disable, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, - }, + { /* sentinel */ } }; -- cgit v0.10.2 From 62a8370676e66f11cda78c63ace0f011b8a5234d Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Wed, 7 Aug 2013 16:08:15 +0530 Subject: net: wan: sbni: Fix incorrect placement of __initdata __initdata should be placed between the variable name and equal sign for the variable to be placed in the intended section. Signed-off-by: Sachin Kamat Signed-off-by: David S. Miller diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index d43f4ef..5bbcb5e 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -176,7 +176,7 @@ static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata; #ifndef MODULE typedef u32 iarr[]; -static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac }; +static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac }; #endif /* A zero-terminated list of I/O addresses to be probed on ISA bus */ -- cgit v0.10.2 From 77273eaa576fd99315d2830e6826678ec85b5c04 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Wed, 7 Aug 2013 16:08:16 +0530 Subject: net: via-rhine: Fix incorrect placement of __initdata __initdata should be placed between the variable name and equal sign for the variable to be placed in the intended section. Signed-off-by: Sachin Kamat Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index b75eb9e..c8f088a 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2407,7 +2407,7 @@ static struct pci_driver rhine_driver = { .driver.pm = RHINE_PM_OPS, }; -static struct dmi_system_id __initdata rhine_dmi_table[] = { +static struct dmi_system_id rhine_dmi_table[] __initdata = { { .ident = "EPIA-M", .matches = { -- cgit v0.10.2 From 555a842811cb5df0774ea7ff59e9c73f30ca0c11 Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 8 Aug 2013 21:02:22 +0800 Subject: bnx2: clean up unnecessary MSI/MSI-X capability find PCI core will initialize device MSI/MSI-X capability in pci_msi_init_pci_dev(). So device driver should use pci_dev->msi_cap/msix_cap to determine whether the device support MSI/MSI-X instead of using pci_find_capability(pci_dev, PCI_CAP_ID_MSI/MSIX). Access to PCIe device config space again will consume more time. Signed-off-by: Yijing Wang Cc: Michael Chan Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 6fdfc18..4148058 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8139,13 +8139,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) + if (pdev->msix_cap) bp->flags |= BNX2_FLAG_MSIX_CAP; } if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) + if (pdev->msi_cap) bp->flags |= BNX2_FLAG_MSI_CAP; } -- cgit v0.10.2 From ae2104be25a5698bb24d436855f6af9b65ea26fe Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 8 Aug 2013 21:02:36 +0800 Subject: bnx2x: clean up unnecessary MSI/MSI-X capability find PCI core will initialize device MSI/MSI-X capability in pci_msi_init_pci_dev(). So device driver should use pci_dev->msi_cap/msix_cap to determine whether the device support MSI/MSI-X instead of using pci_find_capability(pci_dev, PCI_CAP_ID_MSI/MSIX). Access to PCIe device config space again will consume more time. Signed-off-by: Yijing Wang Cc: Eilon Greenstein Cc: netdev@vger.kernel.org Acked-by: Ariel Elior Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ab5bd6c..39d7db5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12544,16 +12544,14 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt, bool is_vf) { - int pos, index; + int index; u16 control = 0; - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - /* * If MSI-X is not supported - return number of SBs needed to support * one fast path queue: one FP queue + SB for CNIC */ - if (!pos) { + if (!pdev->msix_cap) { dev_info(&pdev->dev, "no msix capability found\n"); return 1 + cnic_cnt; } @@ -12566,7 +12564,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, * without the default SB. * For VFs there is no default SB, then we return (index+1). */ - pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); index = control & PCI_MSIX_FLAGS_QSIZE; -- cgit v0.10.2 From 40b295625053954f69d6840d59f2b4d8be251ed3 Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 8 Aug 2013 21:02:44 +0800 Subject: myri10ge: clean up unnecessary MSI/MSI-X capability find PCI core will initialize device MSI/MSI-X capability in pci_msi_init_pci_dev(). So device driver should use pci_dev->msi_cap/msix_cap to determine whether the device support MSI/MSI-X instead of using pci_find_capability(pci_dev, PCI_CAP_ID_MSI/MSIX). Access to PCIe device config space again will consume more time. Signed-off-by: Yijing Wang Cc: Andrew Gallatin Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index d4cdf4d..66c0400 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3625,13 +3625,12 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp) struct pci_dev *pdev = mgp->pdev; char *old_fw; bool old_allocated; - int i, status, ncpus, msix_cap; + int i, status, ncpus; mgp->num_slices = 1; - msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX); ncpus = netif_get_num_default_rss_queues(); - if (myri10ge_max_slices == 1 || msix_cap == 0 || + if (myri10ge_max_slices == 1 || !pdev->msix_cap || (myri10ge_max_slices == -1 && ncpus < 2)) return; -- cgit v0.10.2 From b83082e95ecb95b5da9e6c54d4714ea73c49b58d Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 8 Aug 2013 21:02:56 +0800 Subject: netxen: clean up unnecessary MSI/MSI-X capability find PCI core will initialize device MSI/MSI-X capability in pci_msi_init_pci_dev(). So device driver should use pci_dev->msi_cap/msix_cap to determine whether the device support MSI/MSI-X instead of using pci_find_capability(pci_dev, PCI_CAP_ID_MSI/MSIX). Access to PCIe device config space again will consume more time. Signed-off-by: Yijing Wang Cc: Manish Chopra Cc: Sony Chacko Cc: Rajesh Borundia Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index c401b0b..1046e94 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -459,16 +459,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter) static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) { u32 control; - int pos; - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_dword(pdev, pos, &control); + if (pdev->msix_cap) { + pci_read_config_dword(pdev, pdev->msix_cap, &control); if (enable) control |= PCI_MSIX_FLAGS_ENABLE; else control = 0; - pci_write_config_dword(pdev, pos, control); + pci_write_config_dword(pdev, pdev->msix_cap, control); } } -- cgit v0.10.2 From 0f847584a84cfe2bb24d82df516d745db840bb5f Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 8 Aug 2013 21:03:12 +0800 Subject: tg3: clean up unnecessary MSI/MSI-X capability find PCI core will initialize device MSI/MSI-X capability in pci_msi_init_pci_dev(). So device driver should use pci_dev->msi_cap/msix_cap to determine whether the device support MSI/MSI-X instead of using pci_find_capability(pci_dev, PCI_CAP_ID_MSI/MSIX). Access to PCIe device config space again will consume more time. Signed-off-by: Yijing Wang Cc: Nithin Nayak Sujir Cc: Michael Chan Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 51bf9ca..ff28081 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -15967,7 +15967,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) */ if (tg3_flag(tp, 5780_CLASS)) { tg3_flag_set(tp, 40BIT_DMA_BUG); - tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); + tp->msi_cap = tp->pdev->msi_cap; } else { struct pci_dev *bridge = NULL; -- cgit v0.10.2 From 469230d118dc0822f6bf46c75ab147fa9f00741f Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Thu, 8 Aug 2013 10:27:14 -0700 Subject: pptp: fix sparse pointer warning callid_sock array is referenced via rcu_dereference and sparse rcu checks complains about address space mismatch. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 162464f..9785a3a 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -47,7 +47,7 @@ #define MAX_CALLID 65535 static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1); -static struct pppox_sock **callid_sock; +static struct pppox_sock __rcu **callid_sock; static DEFINE_SPINLOCK(chan_lock); -- cgit v0.10.2 From 149479d019e06df5a7f4096f95c00cfb1380309c Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 8 Aug 2013 14:06:22 -0700 Subject: tcp: add server ip to encrypt cookie in fast open Encrypt the cookie with both server and client IPv4 addresses, such that multi-homed server will grant different cookies based on both the source and destination IPs. No client change is needed since cookie is opaque to the client. Signed-off-by: Yuchung Cheng Reviewed-by: Eric Dumazet Acked-by: Neal Cardwell Signed-off-by: David S. Miller diff --git a/include/net/tcp.h b/include/net/tcp.h index 27b652f..09cb5c1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1300,7 +1300,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp); extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; int tcp_fastopen_reset_cipher(void *key, unsigned int len); -void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc); +extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst, + struct tcp_fastopen_cookie *foc); #define TCP_FASTOPEN_KEY_LENGTH 16 diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 8f7ef0a..ab7bd35 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -58,23 +58,22 @@ error: kfree(ctx); return err; } -/* Computes the fastopen cookie for the peer. - * The peer address is a 128 bits long (pad with zeros for IPv4). +/* Computes the fastopen cookie for the IP path. + * The path is a 128 bits long (pad with zeros for IPv4). * * The caller must check foc->len to determine if a valid cookie * has been generated successfully. */ -void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc) +void tcp_fastopen_cookie_gen(__be32 src, __be32 dst, + struct tcp_fastopen_cookie *foc) { - __be32 peer_addr[4] = { addr, 0, 0, 0 }; + __be32 path[4] = { src, dst, 0, 0 }; struct tcp_fastopen_context *ctx; rcu_read_lock(); ctx = rcu_dereference(tcp_fastopen_ctx); if (ctx) { - crypto_cipher_encrypt_one(ctx->tfm, - foc->val, - (__u8 *)peer_addr); + crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path); foc->len = TCP_FASTOPEN_COOKIE_SIZE; } rcu_read_unlock(); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 280efe5..ec27028 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1316,9 +1316,11 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; return true; } + if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) { if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) { - tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc); + tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, valid_foc); if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) || memcmp(&foc->val[0], &valid_foc->val[0], TCP_FASTOPEN_COOKIE_SIZE) != 0) @@ -1329,14 +1331,16 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; return true; } else if (foc->len == 0) { /* Client requesting a cookie */ - tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc); + tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, valid_foc); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); } else { /* Client sent a cookie with wrong size. Treat it * the same as invalid and return a valid one. */ - tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc); + tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, valid_foc); } return false; } -- cgit v0.10.2 From e370a7236321773245c5522d8bb299380830d3b2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 8 Aug 2013 14:37:32 -0700 Subject: af_unix: improve STREAM behavior with fragmented memory unix_stream_sendmsg() currently uses order-2 allocations, and we had numerous reports this can fail. The __GFP_REPEAT flag present in sock_alloc_send_pskb() is not helping. This patch extends the work done in commit eb6a24816b247c ("af_unix: reduce high order page allocations) for datagram sockets. This opens the possibility of zero copy IO (splice() and friends) The trick is to not use skb_pull() anymore in recvmsg() path, and instead add a @consumed field in UNIXCB() to track amount of already read payload in the skb. There is a performance regression for large sends because of extra page allocations that will be addressed in a follow-up patch, allowing sock_alloc_send_pskb() to attempt high order page allocations. Signed-off-by: Eric Dumazet Cc: David Rientjes Signed-off-by: David S. Miller diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 05442df..a175ba4 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -35,6 +35,7 @@ struct unix_skb_parms { #ifdef CONFIG_SECURITY_NETWORK u32 secid; /* Security ID */ #endif + u32 consumed; }; #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c4ce243..99dc760 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1596,6 +1596,10 @@ out: return err; } +/* We use paged skbs for stream sockets, and limit occupancy to 32768 + * bytes, and a minimun of a full page. + */ +#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) @@ -1609,6 +1613,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct scm_cookie tmp_scm; bool fds_sent = false; int max_level; + int data_len; if (NULL == siocb->scm) siocb->scm = &tmp_scm; @@ -1635,40 +1640,21 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, goto pipe_err; while (sent < len) { - /* - * Optimisation for the fact that under 0.01% of X - * messages typically need breaking up. - */ - - size = len-sent; + size = len - sent; /* Keep two messages in the pipe so it schedules better */ - if (size > ((sk->sk_sndbuf >> 1) - 64)) - size = (sk->sk_sndbuf >> 1) - 64; + size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); - if (size > SKB_MAX_ALLOC) - size = SKB_MAX_ALLOC; - - /* - * Grab a buffer - */ + /* allow fallback to order-0 allocations */ + size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); - skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, - &err); + data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); - if (skb == NULL) + skb = sock_alloc_send_pskb(sk, size - data_len, data_len, + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) goto out_err; - /* - * If you pass two values to the sock_alloc_send_skb - * it tries to grab the large buffer with GFP_NOFS - * (which can fail easily), and if it fails grab the - * fallback size buffer which is under a page and will - * succeed. [Alan] - */ - size = min_t(int, size, skb_tailroom(skb)); - - /* Only send the fds in the first buffer */ err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); if (err < 0) { @@ -1678,7 +1664,10 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, max_level = err + 1; fds_sent = true; - err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); + skb_put(skb, size - data_len); + skb->data_len = data_len; + skb->len = size; + err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, size); if (err) { kfree_skb(skb); goto out_err; @@ -1890,6 +1879,11 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, return timeo; } +static unsigned int unix_skb_len(const struct sk_buff *skb) +{ + return skb->len - UNIXCB(skb).consumed; +} + static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) @@ -1977,8 +1971,8 @@ again: } skip = sk_peek_offset(sk, flags); - while (skip >= skb->len) { - skip -= skb->len; + while (skip >= unix_skb_len(skb)) { + skip -= unix_skb_len(skb); last = skb; skb = skb_peek_next(skb, &sk->sk_receive_queue); if (!skb) @@ -2005,8 +1999,9 @@ again: sunaddr = NULL; } - chunk = min_t(unsigned int, skb->len - skip, size); - if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) { + chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); + if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip, + msg->msg_iov, chunk)) { if (copied == 0) copied = -EFAULT; break; @@ -2016,14 +2011,14 @@ again: /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { - skb_pull(skb, chunk); + UNIXCB(skb).consumed += chunk; sk_peek_offset_bwd(sk, chunk); if (UNIXCB(skb).fp) unix_detach_fds(siocb->scm, skb); - if (skb->len) + if (unix_skb_len(skb)) break; skb_unlink(skb, &sk->sk_receive_queue); @@ -2107,7 +2102,7 @@ long unix_inq_len(struct sock *sk) if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { skb_queue_walk(&sk->sk_receive_queue, skb) - amount += skb->len; + amount += unix_skb_len(skb); } else { skb = skb_peek(&sk->sk_receive_queue); if (skb) -- cgit v0.10.2 From 28d6427109d13b0f447cba5761f88d3548e83605 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 8 Aug 2013 14:38:47 -0700 Subject: net: attempt high order allocations in sock_alloc_send_pskb() Adding paged frags skbs to af_unix sockets introduced a performance regression on large sends because of additional page allocations, even if each skb could carry at least 100% more payload than before. We can instruct sock_alloc_send_pskb() to attempt high order allocations. Most of the time, it does a single page allocation instead of 8. I added an additional parameter to sock_alloc_send_pskb() to let other users to opt-in for this new feature on followup patches. Tested: Before patch : $ netperf -t STREAM_STREAM STREAM STREAM TEST Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 2304 212992 212992 10.00 46861.15 After patch : $ netperf -t STREAM_STREAM STREAM STREAM TEST Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 2304 212992 212992 10.00 57981.11 Signed-off-by: Eric Dumazet Cc: David Rientjes Signed-off-by: David S. Miller diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 182364a..8f6056d 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -524,7 +524,7 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, - err); + err, 0); if (!skb) return NULL; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b163047..978d865 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -949,7 +949,7 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, - &err); + &err, 0); if (!skb) return ERR_PTR(err); diff --git a/include/net/sock.h b/include/net/sock.h index ab6a8b7..e4bbcbf 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1539,7 +1539,8 @@ extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, - int *errcode); + int *errcode, + int max_page_order); extern void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); extern void sock_kfree_s(struct sock *sk, void *mem, int size); diff --git a/net/core/sock.c b/net/core/sock.c index 83667de..5b6beba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1741,24 +1741,23 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, - int *errcode) + int *errcode, int max_page_order) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; + unsigned long chunk; gfp_t gfp_mask; long timeo; int err; int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + struct page *page; + int i; err = -EMSGSIZE; if (npages > MAX_SKB_FRAGS) goto failure; - gfp_mask = sk->sk_allocation; - if (gfp_mask & __GFP_WAIT) - gfp_mask |= __GFP_REPEAT; - timeo = sock_sndtimeo(sk, noblock); - while (1) { + while (!skb) { err = sock_error(sk); if (err != 0) goto failure; @@ -1767,50 +1766,52 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; - if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { - skb = alloc_skb(header_len, gfp_mask); - if (skb) { - int i; - - /* No pages, we're done... */ - if (!data_len) - break; - - skb->truesize += data_len; - skb_shinfo(skb)->nr_frags = npages; - for (i = 0; i < npages; i++) { - struct page *page; - - page = alloc_pages(sk->sk_allocation, 0); - if (!page) { - err = -ENOBUFS; - skb_shinfo(skb)->nr_frags = i; - kfree_skb(skb); - goto failure; - } - - __skb_fill_page_desc(skb, i, - page, 0, - (data_len >= PAGE_SIZE ? - PAGE_SIZE : - data_len)); - data_len -= PAGE_SIZE; - } + if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) { + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + err = -EAGAIN; + if (!timeo) + goto failure; + if (signal_pending(current)) + goto interrupted; + timeo = sock_wait_for_wmem(sk, timeo); + continue; + } - /* Full success... */ - break; - } - err = -ENOBUFS; + err = -ENOBUFS; + gfp_mask = sk->sk_allocation; + if (gfp_mask & __GFP_WAIT) + gfp_mask |= __GFP_REPEAT; + + skb = alloc_skb(header_len, gfp_mask); + if (!skb) goto failure; + + skb->truesize += data_len; + + for (i = 0; npages > 0; i++) { + int order = max_page_order; + + while (order) { + if (npages >= 1 << order) { + page = alloc_pages(sk->sk_allocation | + __GFP_COMP | __GFP_NOWARN, + order); + if (page) + goto fill_page; + } + order--; + } + page = alloc_page(sk->sk_allocation); + if (!page) + goto failure; +fill_page: + chunk = min_t(unsigned long, data_len, + PAGE_SIZE << order); + skb_fill_page_desc(skb, i, page, 0, chunk); + data_len -= chunk; + npages -= 1 << order; } - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - err = -EAGAIN; - if (!timeo) - goto failure; - if (signal_pending(current)) - goto interrupted; - timeo = sock_wait_for_wmem(sk, timeo); } skb_set_owner_w(skb, sk); @@ -1819,6 +1820,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, interrupted: err = sock_intr_errno(timeo); failure: + kfree_skb(skb); *errcode = err; return NULL; } @@ -1827,7 +1829,7 @@ EXPORT_SYMBOL(sock_alloc_send_pskb); struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode) { - return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); + return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); } EXPORT_SYMBOL(sock_alloc_send_skb); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4cb28a7..6c53dd9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2181,7 +2181,7 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, - err); + err, 0); if (!skb) return NULL; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 99dc760..fee9e33 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1479,7 +1479,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, MAX_SKB_FRAGS * PAGE_SIZE); skb = sock_alloc_send_pskb(sk, len - data_len, data_len, - msg->msg_flags & MSG_DONTWAIT, &err); + msg->msg_flags & MSG_DONTWAIT, &err, + PAGE_ALLOC_COSTLY_ORDER); if (skb == NULL) goto out; @@ -1651,7 +1652,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); skb = sock_alloc_send_pskb(sk, size - data_len, data_len, - msg->msg_flags & MSG_DONTWAIT, &err); + msg->msg_flags & MSG_DONTWAIT, &err, + get_order(UNIX_SKB_FRAGS_SZ)); if (!skb) goto out_err; -- cgit v0.10.2 From 6c821bd9edc9563b34c7920b4a99fe64992de530 Mon Sep 17 00:00:00 2001 From: Jonas Jensen Date: Thu, 8 Aug 2013 13:34:54 +0200 Subject: net: Add MOXA ART SoCs ethernet driver The MOXA UC-711X hardware(s) has an ethernet controller that seem to be developed internally. The IC used is "RTL8201CP". Since there is no public documentation, this driver is mostly the one published by MOXA that has been heavily cleaned up / ported from linux 2.6.9. Signed-off-by: Jonas Jensen Signed-off-by: David S. Miller diff --git a/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt new file mode 100644 index 0000000..583418b --- /dev/null +++ b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt @@ -0,0 +1,21 @@ +MOXA ART Ethernet Controller + +Required properties: + +- compatible : Must be "moxa,moxart-mac" +- reg : Should contain register location and length +- interrupts : Should contain the mac interrupt number + +Example: + + mac0: mac@90900000 { + compatible = "moxa,moxart-mac"; + reg = <0x90900000 0x100>; + interrupts = <25 0>; + }; + + mac1: mac@92000000 { + compatible = "moxa,moxart-mac"; + reg = <0x92000000 0x100>; + interrupts = <27 0>; + }; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 2037080..506b024 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" +source "drivers/net/ethernet/moxa/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 390bd0b..c0b8789 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ +obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig new file mode 100644 index 0000000..1731e05 --- /dev/null +++ b/drivers/net/ethernet/moxa/Kconfig @@ -0,0 +1,30 @@ +# +# MOXART device configuration +# + +config NET_VENDOR_MOXART + bool "MOXA ART devices" + default y + depends on (ARM && ARCH_MOXART) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about MOXA ART devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MOXART + +config ARM_MOXART_ETHER + tristate "MOXART Ethernet support" + depends on ARM && ARCH_MOXART + select NET_CORE + ---help--- + If you wish to compile a kernel for a hardware with MOXA ART SoC and + want to use the internal ethernet then you should answer Y to this. + + +endif # NET_VENDOR_MOXART diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile new file mode 100644 index 0000000..aa3c73e9 --- /dev/null +++ b/drivers/net/ethernet/moxa/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the MOXART network device drivers. +# + +obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c new file mode 100644 index 0000000..abd2c54 --- /dev/null +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -0,0 +1,558 @@ +/* MOXA ART Ethernet (RTL8201CP) driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen + * + * Based on code from + * Moxa Technology Co., Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "moxart_ether.h" + +static inline void moxart_emac_write(struct net_device *ndev, + unsigned int reg, unsigned long value) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + writel(value, priv->base + reg); +} + +static void moxart_update_mac_address(struct net_device *ndev) +{ + moxart_emac_write(ndev, REG_MAC_MS_ADDRESS, + ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]))); + moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4, + ((ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | + (ndev->dev_addr[5]))); +} + +static int moxart_set_mac_address(struct net_device *ndev, void *addr) +{ + struct sockaddr *address = addr; + + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len); + moxart_update_mac_address(ndev); + + return 0; +} + +static void moxart_mac_free_memory(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < RX_DESC_NUM; i++) + dma_unmap_single(&ndev->dev, priv->rx_mapping[i], + priv->rx_buf_size, DMA_FROM_DEVICE); + + if (priv->tx_desc_base) + dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, + priv->tx_desc_base, priv->tx_base); + + if (priv->rx_desc_base) + dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, + priv->rx_desc_base, priv->rx_base); + + kfree(priv->tx_buf_base); + kfree(priv->rx_buf_base); +} + +static void moxart_mac_reset(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + writel(SW_RST, priv->base + REG_MAC_CTRL); + while (readl(priv->base + REG_MAC_CTRL) & SW_RST) + mdelay(10); + + writel(0, priv->base + REG_INTERRUPT_MASK); + + priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL; +} + +static void moxart_mac_enable(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + writel(0x00001010, priv->base + REG_INT_TIMER_CTRL); + writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL); + writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL); + + priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M); + writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); + + priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN); + writel(priv->reg_maccr, priv->base + REG_MAC_CTRL); +} + +static void moxart_mac_setup_desc_ring(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + void __iomem *desc; + int i; + + for (i = 0; i < TX_DESC_NUM; i++) { + desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE; + memset(desc, 0, TX_REG_DESC_SIZE); + + priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; + } + writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); + + priv->tx_head = 0; + priv->tx_tail = 0; + + for (i = 0; i < RX_DESC_NUM; i++) { + desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; + memset(desc, 0, RX_REG_DESC_SIZE); + writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, + desc + RX_REG_OFFSET_DESC1); + + priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; + priv->rx_mapping[i] = dma_map_single(&ndev->dev, + priv->rx_buf[i], + priv->rx_buf_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) + netdev_err(ndev, "DMA mapping error\n"); + + writel(priv->rx_mapping[i], + desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); + writel(priv->rx_buf[i], + desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); + } + writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); + + priv->rx_head = 0; + + /* reset the MAC controler TX/RX desciptor base address */ + writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS); + writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS); +} + +static int moxart_mac_open(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + if (!is_valid_ether_addr(ndev->dev_addr)) + return -EADDRNOTAVAIL; + + napi_enable(&priv->napi); + + moxart_mac_reset(ndev); + moxart_update_mac_address(ndev); + moxart_mac_setup_desc_ring(ndev); + moxart_mac_enable(ndev); + netif_start_queue(ndev); + + netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n", + __func__, readl(priv->base + REG_INTERRUPT_MASK), + readl(priv->base + REG_MAC_CTRL)); + + return 0; +} + +static int moxart_mac_stop(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + napi_disable(&priv->napi); + + netif_stop_queue(ndev); + + /* disable all interrupts */ + writel(0, priv->base + REG_INTERRUPT_MASK); + + /* disable all functions */ + writel(0, priv->base + REG_MAC_CTRL); + + return 0; +} + +static int moxart_rx_poll(struct napi_struct *napi, int budget) +{ + struct moxart_mac_priv_t *priv = container_of(napi, + struct moxart_mac_priv_t, + napi); + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + void __iomem *desc; + unsigned int desc0, len; + int rx_head = priv->rx_head; + int rx = 0; + + while (1) { + desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); + desc0 = readl(desc + RX_REG_OFFSET_DESC0); + + if (desc0 & RX_DESC0_DMA_OWN) + break; + + if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL | + RX_DESC0_RUNT | RX_DESC0_ODD_NB)) { + net_dbg_ratelimited("packet error\n"); + priv->stats.rx_dropped++; + priv->stats.rx_errors++; + continue; + } + + len = desc0 & RX_DESC0_FRAME_LEN_MASK; + + if (len > RX_BUF_SIZE) + len = RX_BUF_SIZE; + + skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size); + if (unlikely(!skb)) { + net_dbg_ratelimited("build_skb failed\n"); + priv->stats.rx_dropped++; + priv->stats.rx_errors++; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi, skb); + rx++; + + ndev->last_rx = jiffies; + priv->stats.rx_packets++; + priv->stats.rx_bytes += len; + if (desc0 & RX_DESC0_MULTICAST) + priv->stats.multicast++; + + writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + + rx_head = RX_NEXT(rx_head); + priv->rx_head = rx_head; + + if (rx >= budget) + break; + } + + if (rx < budget) { + napi_gro_flush(napi, false); + __napi_complete(napi); + } + + priv->reg_imr |= RPKT_FINISH_M; + writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); + + return rx; +} + +static void moxart_tx_finished(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + unsigned tx_head = priv->tx_head; + unsigned tx_tail = priv->tx_tail; + + while (tx_tail != tx_head) { + dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], + priv->tx_len[tx_tail], DMA_TO_DEVICE); + + priv->stats.tx_packets++; + priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len; + + dev_kfree_skb_irq(priv->tx_skb[tx_tail]); + priv->tx_skb[tx_tail] = NULL; + + tx_tail = TX_NEXT(tx_tail); + } + priv->tx_tail = tx_tail; +} + +static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *) dev_id; + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS); + + if (ists & XPKT_OK_INT_STS) + moxart_tx_finished(ndev); + + if (ists & RPKT_FINISH) { + if (napi_schedule_prep(&priv->napi)) { + priv->reg_imr &= ~RPKT_FINISH_M; + writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); + __napi_schedule(&priv->napi); + } + } + + return IRQ_HANDLED; +} + +static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + void __iomem *desc; + unsigned int len; + unsigned int tx_head = priv->tx_head; + u32 txdes1; + + desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); + + spin_lock_irq(&priv->txlock); + if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { + net_dbg_ratelimited("no TX space for packet\n"); + priv->stats.tx_dropped++; + return NETDEV_TX_BUSY; + } + + len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; + + priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) { + netdev_err(ndev, "DMA mapping error\n"); + return NETDEV_TX_BUSY; + } + + priv->tx_len[tx_head] = len; + priv->tx_skb[tx_head] = skb; + + writel(priv->tx_mapping[tx_head], + desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); + writel(skb->data, + desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); + + if (skb->len < ETH_ZLEN) { + memset(&skb->data[skb->len], + 0, ETH_ZLEN - skb->len); + len = ETH_ZLEN; + } + + txdes1 = readl(desc + TX_REG_OFFSET_DESC1); + txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS; + txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE); + txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK); + writel(txdes1, desc + TX_REG_OFFSET_DESC1); + writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); + + /* start to send packet */ + writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); + + priv->tx_head = TX_NEXT(tx_head); + + ndev->trans_start = jiffies; + + spin_unlock_irq(&priv->txlock); + + return NETDEV_TX_OK; +} + +static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + return &priv->stats; +} + +static void moxart_mac_setmulticast(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + struct netdev_hw_addr *ha; + int crc_val; + + netdev_for_each_mc_addr(ha, ndev) { + crc_val = crc32_le(~0, ha->addr, ETH_ALEN); + crc_val = (crc_val >> 26) & 0x3f; + if (crc_val >= 32) { + writel(readl(priv->base + REG_MCAST_HASH_TABLE1) | + (1UL << (crc_val - 32)), + priv->base + REG_MCAST_HASH_TABLE1); + } else { + writel(readl(priv->base + REG_MCAST_HASH_TABLE0) | + (1UL << crc_val), + priv->base + REG_MCAST_HASH_TABLE0); + } + } +} + +static void moxart_mac_set_rx_mode(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + spin_lock_irq(&priv->txlock); + + (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) : + (priv->reg_maccr &= ~RCV_ALL); + + (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) : + (priv->reg_maccr &= ~RX_MULTIPKT); + + if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) { + priv->reg_maccr |= HT_MULTI_EN; + moxart_mac_setmulticast(ndev); + } else { + priv->reg_maccr &= ~HT_MULTI_EN; + } + + writel(priv->reg_maccr, priv->base + REG_MAC_CTRL); + + spin_unlock_irq(&priv->txlock); +} + +static struct net_device_ops moxart_netdev_ops = { + .ndo_open = moxart_mac_open, + .ndo_stop = moxart_mac_stop, + .ndo_start_xmit = moxart_mac_start_xmit, + .ndo_get_stats = moxart_mac_get_stats, + .ndo_set_rx_mode = moxart_mac_set_rx_mode, + .ndo_set_mac_address = moxart_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int moxart_mac_probe(struct platform_device *pdev) +{ + struct device *p_dev = &pdev->dev; + struct device_node *node = p_dev->of_node; + struct net_device *ndev; + struct moxart_mac_priv_t *priv; + struct resource *res; + unsigned int irq; + int ret; + + ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t)); + if (!ndev) + return -ENOMEM; + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) { + netdev_err(ndev, "irq_of_parse_and_map failed\n"); + return -EINVAL; + } + + priv = netdev_priv(ndev); + priv->ndev = ndev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ndev->base_addr = res->start; + priv->base = devm_ioremap_resource(p_dev, res); + ret = IS_ERR(priv->base); + if (ret) { + dev_err(p_dev, "devm_ioremap_resource failed\n"); + goto init_fail; + } + + spin_lock_init(&priv->txlock); + + priv->tx_buf_size = TX_BUF_SIZE; + priv->rx_buf_size = RX_BUF_SIZE + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * + TX_DESC_NUM, &priv->tx_base, + GFP_DMA | GFP_KERNEL); + if (priv->tx_desc_base == NULL) + goto init_fail; + + priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * + RX_DESC_NUM, &priv->rx_base, + GFP_DMA | GFP_KERNEL); + if (priv->rx_desc_base == NULL) + goto init_fail; + + priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, + GFP_ATOMIC); + if (!priv->tx_buf_base) + goto init_fail; + + priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, + GFP_ATOMIC); + if (!priv->rx_buf_base) + goto init_fail; + + platform_set_drvdata(pdev, ndev); + + ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0, + pdev->name, ndev); + if (ret) { + netdev_err(ndev, "devm_request_irq failed\n"); + goto init_fail; + } + + ether_setup(ndev); + ndev->netdev_ops = &moxart_netdev_ops; + netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM); + ndev->priv_flags |= IFF_UNICAST_FLT; + ndev->irq = irq; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ret = register_netdev(ndev); + if (ret) { + free_netdev(ndev); + goto init_fail; + } + + netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n", + __func__, ndev->irq, ndev->dev_addr); + + return 0; + +init_fail: + netdev_err(ndev, "init failed\n"); + moxart_mac_free_memory(ndev); + + return ret; +} + +static int moxart_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + unregister_netdev(ndev); + free_irq(ndev->irq, ndev); + moxart_mac_free_memory(ndev); + platform_set_drvdata(pdev, NULL); + free_netdev(ndev); + + return 0; +} + +static const struct of_device_id moxart_mac_match[] = { + { .compatible = "moxa,moxart-mac" }, + { } +}; + +struct __initdata platform_driver moxart_mac_driver = { + .probe = moxart_mac_probe, + .remove = moxart_remove, + .driver = { + .name = "moxart-ethernet", + .owner = THIS_MODULE, + .of_match_table = moxart_mac_match, + }, +}; +module_platform_driver(moxart_mac_driver); + +MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jonas Jensen "); diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h new file mode 100644 index 0000000..2be9280 --- /dev/null +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -0,0 +1,330 @@ +/* MOXA ART Ethernet (RTL8201CP) driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen + * + * Based on code from + * Moxa Technology Co., Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MOXART_ETHERNET_H +#define _MOXART_ETHERNET_H + +#define TX_REG_OFFSET_DESC0 0 +#define TX_REG_OFFSET_DESC1 4 +#define TX_REG_OFFSET_DESC2 8 +#define TX_REG_DESC_SIZE 16 + +#define RX_REG_OFFSET_DESC0 0 +#define RX_REG_OFFSET_DESC1 4 +#define RX_REG_OFFSET_DESC2 8 +#define RX_REG_DESC_SIZE 16 + +#define TX_DESC0_PKT_LATE_COL 0x1 /* abort, late collision */ +#define TX_DESC0_RX_PKT_EXS_COL 0x2 /* abort, >16 collisions */ +#define TX_DESC0_DMA_OWN 0x80000000 /* owned by controller */ +#define TX_DESC1_BUF_SIZE_MASK 0x7ff +#define TX_DESC1_LTS 0x8000000 /* last TX packet */ +#define TX_DESC1_FTS 0x10000000 /* first TX packet */ +#define TX_DESC1_FIFO_COMPLETE 0x20000000 +#define TX_DESC1_INTR_COMPLETE 0x40000000 +#define TX_DESC1_END 0x80000000 +#define TX_DESC2_ADDRESS_PHYS 0 +#define TX_DESC2_ADDRESS_VIRT 4 + +#define RX_DESC0_FRAME_LEN 0 +#define RX_DESC0_FRAME_LEN_MASK 0x7FF +#define RX_DESC0_MULTICAST 0x10000 +#define RX_DESC0_BROADCAST 0x20000 +#define RX_DESC0_ERR 0x40000 +#define RX_DESC0_CRC_ERR 0x80000 +#define RX_DESC0_FTL 0x100000 +#define RX_DESC0_RUNT 0x200000 /* packet less than 64 bytes */ +#define RX_DESC0_ODD_NB 0x400000 /* receive odd nibbles */ +#define RX_DESC0_LRS 0x10000000 /* last receive segment */ +#define RX_DESC0_FRS 0x20000000 /* first receive segment */ +#define RX_DESC0_DMA_OWN 0x80000000 +#define RX_DESC1_BUF_SIZE_MASK 0x7FF +#define RX_DESC1_END 0x80000000 +#define RX_DESC2_ADDRESS_PHYS 0 +#define RX_DESC2_ADDRESS_VIRT 4 + +#define TX_DESC_NUM 64 +#define TX_DESC_NUM_MASK (TX_DESC_NUM-1) +#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) +#define TX_BUF_SIZE 1600 +#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) + +#define RX_DESC_NUM 64 +#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) +#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK)) +#define RX_BUF_SIZE 1600 +#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1) + +#define REG_INTERRUPT_STATUS 0 +#define REG_INTERRUPT_MASK 4 +#define REG_MAC_MS_ADDRESS 8 +#define REG_MAC_LS_ADDRESS 12 +#define REG_MCAST_HASH_TABLE0 16 +#define REG_MCAST_HASH_TABLE1 20 +#define REG_TX_POLL_DEMAND 24 +#define REG_RX_POLL_DEMAND 28 +#define REG_TXR_BASE_ADDRESS 32 +#define REG_RXR_BASE_ADDRESS 36 +#define REG_INT_TIMER_CTRL 40 +#define REG_APOLL_TIMER_CTRL 44 +#define REG_DMA_BLEN_CTRL 48 +#define REG_RESERVED1 52 +#define REG_MAC_CTRL 136 +#define REG_MAC_STATUS 140 +#define REG_PHY_CTRL 144 +#define REG_PHY_WRITE_DATA 148 +#define REG_FLOW_CTRL 152 +#define REG_BACK_PRESSURE 156 +#define REG_RESERVED2 160 +#define REG_TEST_SEED 196 +#define REG_DMA_FIFO_STATE 200 +#define REG_TEST_MODE 204 +#define REG_RESERVED3 208 +#define REG_TX_COL_COUNTER 212 +#define REG_RPF_AEP_COUNTER 216 +#define REG_XM_PG_COUNTER 220 +#define REG_RUNT_TLC_COUNTER 224 +#define REG_CRC_FTL_COUNTER 228 +#define REG_RLC_RCC_COUNTER 232 +#define REG_BROC_COUNTER 236 +#define REG_MULCA_COUNTER 240 +#define REG_RP_COUNTER 244 +#define REG_XP_COUNTER 248 + +#define REG_PHY_CTRL_OFFSET 0x0 +#define REG_PHY_STATUS 0x1 +#define REG_PHY_ID1 0x2 +#define REG_PHY_ID2 0x3 +#define REG_PHY_ANA 0x4 +#define REG_PHY_ANLPAR 0x5 +#define REG_PHY_ANE 0x6 +#define REG_PHY_ECTRL1 0x10 +#define REG_PHY_QPDS 0x11 +#define REG_PHY_10BOP 0x12 +#define REG_PHY_ECTRL2 0x13 +#define REG_PHY_FTMAC100_WRITE 0x8000000 +#define REG_PHY_FTMAC100_READ 0x4000000 + +/* REG_INTERRUPT_STATUS */ +#define RPKT_FINISH BIT(0) /* DMA data received */ +#define NORXBUF BIT(1) /* receive buffer unavailable */ +#define XPKT_FINISH BIT(2) /* DMA moved data to TX FIFO */ +#define NOTXBUF BIT(3) /* transmit buffer unavailable */ +#define XPKT_OK_INT_STS BIT(4) /* transmit to ethernet success */ +#define XPKT_LOST_INT_STS BIT(5) /* transmit ethernet lost (collision) */ +#define RPKT_SAV BIT(6) /* FIFO receive success */ +#define RPKT_LOST_INT_STS BIT(7) /* FIFO full, receive failed */ +#define AHB_ERR BIT(8) /* AHB error */ +#define PHYSTS_CHG BIT(9) /* PHY link status change */ + +/* REG_INTERRUPT_MASK */ +#define RPKT_FINISH_M BIT(0) +#define NORXBUF_M BIT(1) +#define XPKT_FINISH_M BIT(2) +#define NOTXBUF_M BIT(3) +#define XPKT_OK_M BIT(4) +#define XPKT_LOST_M BIT(5) +#define RPKT_SAV_M BIT(6) +#define RPKT_LOST_M BIT(7) +#define AHB_ERR_M BIT(8) +#define PHYSTS_CHG_M BIT(9) + +/* REG_MAC_MS_ADDRESS */ +#define MAC_MADR_MASK 0xffff /* 2 MSB MAC address */ + +/* REG_INT_TIMER_CTRL */ +#define TXINT_TIME_SEL BIT(15) /* TX cycle time period */ +#define TXINT_THR_MASK 0x7000 +#define TXINT_CNT_MASK 0xf00 +#define RXINT_TIME_SEL BIT(7) /* RX cycle time period */ +#define RXINT_THR_MASK 0x70 +#define RXINT_CNT_MASK 0xF + +/* REG_APOLL_TIMER_CTRL */ +#define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */ +#define TXPOLL_CNT_MASK 0xf00 +#define TXPOLL_CNT_SHIFT_BIT 8 +#define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */ +#define RXPOLL_CNT_MASK 0xF +#define RXPOLL_CNT_SHIFT_BIT 0 + +/* REG_DMA_BLEN_CTRL */ +#define RX_THR_EN BIT(9) /* RX FIFO threshold arbitration */ +#define RXFIFO_HTHR_MASK 0x1c0 +#define RXFIFO_LTHR_MASK 0x38 +#define INCR16_EN BIT(2) /* AHB bus INCR16 burst command */ +#define INCR8_EN BIT(1) /* AHB bus INCR8 burst command */ +#define INCR4_EN BIT(0) /* AHB bus INCR4 burst command */ + +/* REG_MAC_CTRL */ +#define RX_BROADPKT BIT(17) /* receive broadcast packets */ +#define RX_MULTIPKT BIT(16) /* receive all multicast packets */ +#define FULLDUP BIT(15) /* full duplex */ +#define CRC_APD BIT(14) /* append CRC to transmitted packet */ +#define RCV_ALL BIT(12) /* ignore incoming packet destination */ +#define RX_FTL BIT(11) /* accept packets larger than 1518 B */ +#define RX_RUNT BIT(10) /* accept packets smaller than 64 B */ +#define HT_MULTI_EN BIT(9) /* accept on hash and mcast pass */ +#define RCV_EN BIT(8) /* receiver enable */ +#define ENRX_IN_HALFTX BIT(6) /* enable receive in half duplex mode */ +#define XMT_EN BIT(5) /* transmit enable */ +#define CRC_DIS BIT(4) /* disable CRC check when receiving */ +#define LOOP_EN BIT(3) /* internal loop-back */ +#define SW_RST BIT(2) /* software reset, last 64 AHB clocks */ +#define RDMA_EN BIT(1) /* enable receive DMA chan */ +#define XDMA_EN BIT(0) /* enable transmit DMA chan */ + +/* REG_MAC_STATUS */ +#define COL_EXCEED BIT(11) /* more than 16 collisions */ +#define LATE_COL BIT(10) /* transmit late collision detected */ +#define XPKT_LOST BIT(9) /* transmit to ethernet lost */ +#define XPKT_OK BIT(8) /* transmit to ethernet success */ +#define RUNT_MAC_STS BIT(7) /* receive runt detected */ +#define FTL_MAC_STS BIT(6) /* receive frame too long detected */ +#define CRC_ERR_MAC_STS BIT(5) +#define RPKT_LOST BIT(4) /* RX FIFO full, receive failed */ +#define RPKT_SAVE BIT(3) /* RX FIFO receive success */ +#define COL BIT(2) /* collision, incoming packet dropped */ +#define MCPU_BROADCAST BIT(1) +#define MCPU_MULTICAST BIT(0) + +/* REG_PHY_CTRL */ +#define MIIWR BIT(27) /* init write sequence (auto cleared)*/ +#define MIIRD BIT(26) +#define REGAD_MASK 0x3e00000 +#define PHYAD_MASK 0x1f0000 +#define MIIRDATA_MASK 0xffff + +/* REG_PHY_WRITE_DATA */ +#define MIIWDATA_MASK 0xffff + +/* REG_FLOW_CTRL */ +#define PAUSE_TIME_MASK 0xffff0000 +#define FC_HIGH_MASK 0xf000 +#define FC_LOW_MASK 0xf00 +#define RX_PAUSE BIT(4) /* receive pause frame */ +#define TX_PAUSED BIT(3) /* transmit pause due to receive */ +#define FCTHR_EN BIT(2) /* enable threshold mode. */ +#define TX_PAUSE BIT(1) /* transmit pause frame */ +#define FC_EN BIT(0) /* flow control mode enable */ + +/* REG_BACK_PRESSURE */ +#define BACKP_LOW_MASK 0xf00 +#define BACKP_JAM_LEN_MASK 0xf0 +#define BACKP_MODE BIT(1) /* address mode */ +#define BACKP_ENABLE BIT(0) + +/* REG_TEST_SEED */ +#define TEST_SEED_MASK 0x3fff + +/* REG_DMA_FIFO_STATE */ +#define TX_DMA_REQUEST BIT(31) +#define RX_DMA_REQUEST BIT(30) +#define TX_DMA_GRANT BIT(29) +#define RX_DMA_GRANT BIT(28) +#define TX_FIFO_EMPTY BIT(27) +#define RX_FIFO_EMPTY BIT(26) +#define TX_DMA2_SM_MASK 0x7000 +#define TX_DMA1_SM_MASK 0xf00 +#define RX_DMA2_SM_MASK 0x70 +#define RX_DMA1_SM_MASK 0xF + +/* REG_TEST_MODE */ +#define SINGLE_PKT BIT(26) /* single packet mode */ +#define PTIMER_TEST BIT(25) /* automatic polling timer test mode */ +#define ITIMER_TEST BIT(24) /* interrupt timer test mode */ +#define TEST_SEED_SELECT BIT(22) +#define SEED_SELECT BIT(21) +#define TEST_MODE BIT(20) +#define TEST_TIME_MASK 0xffc00 +#define TEST_EXCEL_MASK 0x3e0 + +/* REG_TX_COL_COUNTER */ +#define TX_MCOL_MASK 0xffff0000 +#define TX_MCOL_SHIFT_BIT 16 +#define TX_SCOL_MASK 0xffff +#define TX_SCOL_SHIFT_BIT 0 + +/* REG_RPF_AEP_COUNTER */ +#define RPF_MASK 0xffff0000 +#define RPF_SHIFT_BIT 16 +#define AEP_MASK 0xffff +#define AEP_SHIFT_BIT 0 + +/* REG_XM_PG_COUNTER */ +#define XM_MASK 0xffff0000 +#define XM_SHIFT_BIT 16 +#define PG_MASK 0xffff +#define PG_SHIFT_BIT 0 + +/* REG_RUNT_TLC_COUNTER */ +#define RUNT_CNT_MASK 0xffff0000 +#define RUNT_CNT_SHIFT_BIT 16 +#define TLCC_MASK 0xffff +#define TLCC_SHIFT_BIT 0 + +/* REG_CRC_FTL_COUNTER */ +#define CRCER_CNT_MASK 0xffff0000 +#define CRCER_CNT_SHIFT_BIT 16 +#define FTL_CNT_MASK 0xffff +#define FTL_CNT_SHIFT_BIT 0 + +/* REG_RLC_RCC_COUNTER */ +#define RLC_MASK 0xffff0000 +#define RLC_SHIFT_BIT 16 +#define RCC_MASK 0xffff +#define RCC_SHIFT_BIT 0 + +/* REG_PHY_STATUS */ +#define AN_COMPLETE 0x20 +#define LINK_STATUS 0x4 + +struct moxart_mac_priv_t { + void __iomem *base; + struct net_device_stats stats; + unsigned int reg_maccr; + unsigned int reg_imr; + struct napi_struct napi; + struct net_device *ndev; + + dma_addr_t rx_base; + dma_addr_t rx_mapping[RX_DESC_NUM]; + void __iomem *rx_desc_base; + unsigned char *rx_buf_base; + unsigned char *rx_buf[RX_DESC_NUM]; + unsigned int rx_head; + unsigned int rx_buf_size; + + dma_addr_t tx_base; + dma_addr_t tx_mapping[TX_DESC_NUM]; + void __iomem *tx_desc_base; + unsigned char *tx_buf_base; + unsigned char *tx_buf[RX_DESC_NUM]; + unsigned int tx_head; + unsigned int tx_buf_size; + + spinlock_t txlock; + unsigned int tx_len[TX_DESC_NUM]; + struct sk_buff *tx_skb[TX_DESC_NUM]; + unsigned int tx_tail; +}; + +#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX +#error MOXA ART Ethernet device driver TX buffer is too large! +#endif +#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX +#error MOXA ART Ethernet device driver RX buffer is too large! +#endif + +#endif -- cgit v0.10.2 From f3dfd20860db3d0c400dd83a378176a28d3662db Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 11 Aug 2013 21:54:48 -0700 Subject: af_unix: fix bug on large send() commit e370a723632 ("af_unix: improve STREAM behavior with fragmented memory") added a bug on large send() because the skb_copy_datagram_from_iovec() call always start from the beginning of iovec. We must instead use the @sent variable to properly skip the already processed part. Reported-by: Hannes Frederic Sowa Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fee9e33..86de99a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1669,7 +1669,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, skb_put(skb, size - data_len); skb->data_len = data_len; skb->len = size; - err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, size); + err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, + sent, size); if (err) { kfree_skb(skb); goto out_err; -- cgit v0.10.2 From 1da5fcc86d71040c5b294ca5611ae6c86bfa815c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Aug 2013 14:10:48 +0200 Subject: nl80211: clean up CQM settings code Clean up the CQM settings code a bit and while at it enforce that when setting the threshold to 0 (disable) the hysteresis is also set to 0 to avoid confusion. As we haven't enforce it, simply override userspace. Signed-off-by: Johannes Berg diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f7cb121..c2a40a2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7562,14 +7562,12 @@ static int nl80211_set_cqm_txe(struct genl_info *info, u32 rate, u32 pkts, u32 intvl) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; - struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; if (rate > 100 || intvl > NL80211_CQM_TXE_MAX_INTVL) return -EINVAL; - wdev = dev->ieee80211_ptr; - if (!rdev->ops->set_cqm_txe_config) return -EOPNOTSUPP; @@ -7584,13 +7582,15 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, s32 threshold, u32 hysteresis) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; - struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; if (threshold > 0) return -EINVAL; - wdev = dev->ieee80211_ptr; + /* disabling - hysteresis should also be zero then */ + if (threshold == 0) + hysteresis = 0; if (!rdev->ops->set_cqm_rssi_config) return -EOPNOTSUPP; @@ -7609,36 +7609,33 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) int err; cqm = info->attrs[NL80211_ATTR_CQM]; - if (!cqm) { - err = -EINVAL; - goto out; - } + if (!cqm) + return -EINVAL; err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm, nl80211_attr_cqm_policy); if (err) - goto out; + return err; if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] && attrs[NL80211_ATTR_CQM_RSSI_HYST]) { - s32 threshold; - u32 hysteresis; - threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); - hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); - err = nl80211_set_cqm_rssi(info, threshold, hysteresis); - } else if (attrs[NL80211_ATTR_CQM_TXE_RATE] && - attrs[NL80211_ATTR_CQM_TXE_PKTS] && - attrs[NL80211_ATTR_CQM_TXE_INTVL]) { - u32 rate, pkts, intvl; - rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]); - pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]); - intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]); - err = nl80211_set_cqm_txe(info, rate, pkts, intvl); - } else - err = -EINVAL; + s32 threshold = nla_get_s32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); + u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); -out: - return err; + return nl80211_set_cqm_rssi(info, threshold, hysteresis); + } + + if (attrs[NL80211_ATTR_CQM_TXE_RATE] && + attrs[NL80211_ATTR_CQM_TXE_PKTS] && + attrs[NL80211_ATTR_CQM_TXE_INTVL]) { + u32 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]); + u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]); + u32 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]); + + return nl80211_set_cqm_txe(info, rate, pkts, intvl); + } + + return -EINVAL; } static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) -- cgit v0.10.2 From af61a165187bb94b1dc7628ef815c23d0eacf40b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 2 Jul 2013 18:09:12 +0200 Subject: mac80211: add control port protocol TX control flag A lot of drivers check the frame protocol for ETH_P_PAE, for various reasons (like making those more reliable). Add a new flags bitmap to the TX control info and a new flag indicating the control port protocol is in use to let all drivers also apply such logic to other control port protocols, should they be configured. Also use the new flag in the iwlwifi drivers. Signed-off-by: Johannes Berg diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index 49267ea..f403ec3 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -325,6 +325,7 @@ functions/definitions !Finclude/net/mac80211.h ieee80211_rx_status !Finclude/net/mac80211.h mac80211_rx_flags +!Finclude/net/mac80211.h mac80211_tx_info_flags !Finclude/net/mac80211.h mac80211_tx_control_flags !Finclude/net/mac80211.h mac80211_rate_control_flags !Finclude/net/mac80211.h ieee80211_tx_rate diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 5ee983f..f364583 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -87,7 +87,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, priv->lib->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc) || - skb->protocol == cpu_to_be16(ETH_P_PAE))) + info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) tx_flags |= TX_CMD_FLG_IGNORE_BT; diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 4491c1c..684c416 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -33,10 +33,11 @@ static inline bool iwl_trace_data(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - if (ieee80211_is_data(hdr->frame_control)) - return skb->protocol != cpu_to_be16(ETH_P_PAE); - return false; + if (!ieee80211_is_data(hdr->frame_control)) + return false; + return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO); } static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index f0e96a9..d62a6d3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -91,11 +91,10 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */ - if (info->band == IEEE80211_BAND_2GHZ && - (skb->protocol == cpu_to_be16(ETH_P_PAE) || - is_multicast_ether_addr(hdr->addr1) || - ieee80211_is_back_req(fc) || - ieee80211_is_mgmt(fc))) + if (info->band == IEEE80211_BAND_2GHZ && + (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO || + is_multicast_ether_addr(hdr->addr1) || + ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc))) tx_flags |= TX_CMD_FLG_BT_DIS; if (ieee80211_has_morefrags(fc)) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 9cda372..b70c001 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -375,7 +375,7 @@ struct ieee80211_bss_conf { }; /** - * enum mac80211_tx_control_flags - flags to describe transmission information/status + * enum mac80211_tx_info_flags - flags to describe transmission information/status * * These flags are used with the @flags member of &ieee80211_tx_info. * @@ -471,7 +471,7 @@ struct ieee80211_bss_conf { * Note: If you have to add new flags to the enumeration, then don't * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. */ -enum mac80211_tx_control_flags { +enum mac80211_tx_info_flags { IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1), IEEE80211_TX_CTL_NO_ACK = BIT(2), @@ -507,6 +507,18 @@ enum mac80211_tx_control_flags { #define IEEE80211_TX_CTL_STBC_SHIFT 23 +/** + * enum mac80211_tx_control_flags - flags to describe transmit control + * + * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control + * protocol frame (e.g. EAP) + * + * These flags are used in tx_info->control.flags. + */ +enum mac80211_tx_control_flags { + IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0), +}; + /* * This definition is used as a mask to clear all temporary flags, which are * set by the tx handlers for each transmission attempt by the mac80211 stack. @@ -680,7 +692,8 @@ struct ieee80211_tx_info { /* NB: vif can be NULL for injected frames */ struct ieee80211_vif *vif; struct ieee80211_key_conf *hw_key; - /* 8 bytes free */ + u32 flags; + /* 4 bytes free */ } control; struct { struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 7475a7a..9eff382 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -439,12 +439,13 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u16 tid; if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) return; - if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) return; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; @@ -776,7 +777,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, /* Don't use EAPOL frames for sampling on non-mrr hw */ if (mp->hw->max_rates == 1 && - txrc->skb->protocol == cpu_to_be16(ETH_P_PAE)) + (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) sample_idx = -1; else sample_idx = minstrel_get_sample_rate(mp, mi); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0e42322..098ae85 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -539,9 +539,11 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); - if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol && - tx->sdata->control_port_no_encrypt)) - info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) { + if (tx->sdata->control_port_no_encrypt) + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + } return TX_CONTINUE; } -- cgit v0.10.2 From fc73f11f5fa230f8c687d51b0fddb00433092ce0 Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Wed, 31 Jul 2013 18:04:15 +0300 Subject: cfg80211: add wdev to testmode cmd To allow drivers to implement per-interface testmode operations more easily, pass a wdev pointer if any identification for one was given from userspace. Clean up the code a bit while at it. Signed-off-by: David Spinadel Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c index acc9aa8..d67170e 100644 --- a/drivers/net/wireless/ath/ath6kl/testmode.c +++ b/drivers/net/wireless/ath/ath6kl/testmode.c @@ -66,7 +66,8 @@ nla_put_failure: ath6kl_warn("nla_put failed on testmode rx skb!\n"); } -int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len) +int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, + void *data, int len) { struct ath6kl *ar = wiphy_priv(wiphy); struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1]; diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h index fe651d6..9fbcdec3 100644 --- a/drivers/net/wireless/ath/ath6kl/testmode.h +++ b/drivers/net/wireless/ath/ath6kl/testmode.h @@ -20,7 +20,8 @@ #ifdef CONFIG_NL80211_TESTMODE void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len); -int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len); +int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, + void *data, int len); #else @@ -29,7 +30,9 @@ static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, { } -static inline int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len) +static inline int ath6kl_tm_cmd(struct wiphy *wiphy, + struct wireless_dev *wdev, + void *data, int len) { return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 277b37a..b7d8850 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -3152,7 +3152,9 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy, } #ifdef CONFIG_NL80211_TESTMODE -static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len) +static int brcmf_cfg80211_testmode(struct wiphy *wiphy, + struct wireless_dev *wdev, + void *data, int len) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b7495c7..9ab7a06 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2081,7 +2081,7 @@ struct cfg80211_update_ft_ies_params { * @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management * frame on another channel * - * @testmode_cmd: run a test mode command + * @testmode_cmd: run a test mode command; @wdev may be %NULL * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be * used by the function, but 0 and 1 must not be touched. Additionally, * return error codes other than -ENOBUFS and -ENOENT will terminate the @@ -2290,7 +2290,8 @@ struct cfg80211_ops { void (*rfkill_poll)(struct wiphy *wiphy); #ifdef CONFIG_NL80211_TESTMODE - int (*testmode_cmd)(struct wiphy *wiphy, void *data, int len); + int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev, + void *data, int len); int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 44449ce..c77916f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2300,7 +2300,9 @@ static void ieee80211_rfkill_poll(struct wiphy *wiphy) } #ifdef CONFIG_NL80211_TESTMODE -static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len) +static int ieee80211_testmode_cmd(struct wiphy *wiphy, + struct wireless_dev *wdev, + void *data, int len) { struct ieee80211_local *local = wiphy_priv(wiphy); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c2a40a2..334697d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6591,19 +6591,30 @@ static struct genl_multicast_group nl80211_testmode_mcgrp = { static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct wireless_dev *wdev = + __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs); int err; + if (!rdev->ops->testmode_cmd) + return -EOPNOTSUPP; + + if (IS_ERR(wdev)) { + err = PTR_ERR(wdev); + if (err != -EINVAL) + return err; + wdev = NULL; + } else if (wdev->wiphy != &rdev->wiphy) { + return -EINVAL; + } + if (!info->attrs[NL80211_ATTR_TESTDATA]) return -EINVAL; - err = -EOPNOTSUPP; - if (rdev->ops->testmode_cmd) { - rdev->testmode_info = info; - err = rdev_testmode_cmd(rdev, + rdev->testmode_info = info; + err = rdev_testmode_cmd(rdev, wdev, nla_data(info->attrs[NL80211_ATTR_TESTDATA]), nla_len(info->attrs[NL80211_ATTR_TESTDATA])); - rdev->testmode_info = NULL; - } + rdev->testmode_info = NULL; return err; } diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index de870d4..37ce9fd 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -516,11 +516,12 @@ static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev) #ifdef CONFIG_NL80211_TESTMODE static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, void *data, int len) { int ret; - trace_rdev_testmode_cmd(&rdev->wiphy); - ret = rdev->ops->testmode_cmd(&rdev->wiphy, data, len); + trace_rdev_testmode_cmd(&rdev->wiphy, wdev); + ret = rdev->ops->testmode_cmd(&rdev->wiphy, wdev, data, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } diff --git a/net/wireless/trace.h b/net/wireless/trace.h index f0ebdcd..ba5f0d6 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1293,15 +1293,17 @@ TRACE_EVENT(rdev_return_int_int, #ifdef CONFIG_NL80211_TESTMODE TRACE_EVENT(rdev_testmode_cmd, - TP_PROTO(struct wiphy *wiphy), - TP_ARGS(wiphy), + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), + TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY + WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; + WDEV_ASSIGN; ), - TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) + TP_printk(WIPHY_PR_FMT WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(rdev_testmode_dump, -- cgit v0.10.2 From 52981cd79461e47fe683febfcbd3d380c72b1c6c Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Wed, 31 Jul 2013 18:06:22 +0300 Subject: mac80211: add vif to testmode cmd Pass the wdev from cfg80211 on to the driver as the vif if given and it's valid for the driver. Signed-off-by: David Spinadel Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 7b2a622..a0d2aac 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1364,6 +1364,7 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = { static int hwsim_fops_ps_write(void *dat, u64 val); static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, void *data, int len) { struct mac80211_hwsim_data *hwsim = hw->priv; diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index f344276..527590f 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -356,7 +356,8 @@ out: return ret; } -int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) +int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) { struct wl1271 *wl = hw->priv; struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; diff --git a/drivers/net/wireless/ti/wlcore/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h index 8071654..61d8434 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.h +++ b/drivers/net/wireless/ti/wlcore/testmode.h @@ -26,6 +26,7 @@ #include -int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len); +int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); #endif /* __WL1271_TESTMODE_H__ */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b70c001..df93c77 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2516,8 +2516,8 @@ enum ieee80211_roc_type { * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout * accordingly. This callback is not required and may sleep. * - * @testmode_cmd: Implement a cfg80211 test mode command. - * The callback can sleep. + * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may + * be %NULL. The callback can sleep. * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep. * * @flush: Flush all pending frames from the hardware queue, making sure @@ -2778,7 +2778,8 @@ struct ieee80211_ops { void (*rfkill_poll)(struct ieee80211_hw *hw); void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); #ifdef CONFIG_NL80211_TESTMODE - int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len); + int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c77916f..7aa38ce 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2305,11 +2305,20 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len) { struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_vif *vif = NULL; if (!local->ops->testmode_cmd) return -EOPNOTSUPP; - return local->ops->testmode_cmd(&local->hw, data, len); + if (wdev) { + struct ieee80211_sub_if_data *sdata; + + sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + if (sdata->flags & IEEE80211_SDATA_IN_DRIVER) + vif = &sdata->vif; + } + + return local->ops->testmode_cmd(&local->hw, vif, data, len); } static int ieee80211_testmode_dump(struct wiphy *wiphy, -- cgit v0.10.2 From f8f03c3edc39f179457a4a5c6095f45a8300db9b Mon Sep 17 00:00:00 2001 From: Eytan Lifshitz Date: Wed, 7 Aug 2013 19:36:42 +0300 Subject: iwlwifi: mvm: add support to the new FW time event API The time event firmware API will change, add the support for that. Use the new API throughout and convert to the old where needed. Signed-off-by: Eytan Lifshitz Reviewed-by: Guy Cohen Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index bd335f0..1705d24 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -76,6 +76,7 @@ * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api + * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API. * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * (rather than two) IPv6 addresses * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API @@ -88,6 +89,7 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8), + IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9), IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11), }; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 55854a3..b104710 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -499,71 +499,199 @@ enum iwl_time_event_type { TE_MAX }; /* MAC_EVENT_TYPE_API_E_VER_1 */ + + +/* Time event - defines for command API v1 */ + +/* + * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V1_FRAG_NONE = 0, + TE_V1_FRAG_SINGLE = 1, + TE_V1_FRAG_DUAL = 2, + TE_V1_FRAG_ENDLESS = 0xffffffff +}; + +/* If a Time Event can be fragmented, this is the max number of fragments */ +#define TE_V1_FRAG_MAX_MSK 0x0fffffff +/* Repeat the time event endlessly (until removed) */ +#define TE_V1_REPEAT_ENDLESS 0xffffffff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff + /* Time Event dependencies: none, on another TE, or in a specific time */ enum { - TE_INDEPENDENT = 0, - TE_DEP_OTHER = 1, - TE_DEP_TSF = 2, - TE_EVENT_SOCIOPATHIC = 4, + TE_V1_INDEPENDENT = 0, + TE_V1_DEP_OTHER = BIT(0), + TE_V1_DEP_TSF = BIT(1), + TE_V1_EVENT_SOCIOPATHIC = BIT(2), }; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ + /* + * @TE_V1_NOTIF_NONE: no notifications + * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. + * * Supported Time event notifications configuration. * A notification (both event and fragment) includes a status indicating weather * the FW was able to schedule the event or not. For fragment start/end * notification the status is always success. There is no start/end fragment * notification for monolithic events. - * - * @TE_NOTIF_NONE: no notifications - * @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_NOTIF_INTERNAL_FRAG_END: internal FW use. */ enum { - TE_NOTIF_NONE = 0, - TE_NOTIF_HOST_EVENT_START = 0x1, - TE_NOTIF_HOST_EVENT_END = 0x2, - TE_NOTIF_INTERNAL_EVENT_START = 0x4, - TE_NOTIF_INTERNAL_EVENT_END = 0x8, - TE_NOTIF_HOST_FRAG_START = 0x10, - TE_NOTIF_HOST_FRAG_END = 0x20, - TE_NOTIF_INTERNAL_FRAG_START = 0x40, - TE_NOTIF_INTERNAL_FRAG_END = 0x80 + TE_V1_NOTIF_NONE = 0, + TE_V1_NOTIF_HOST_EVENT_START = BIT(0), + TE_V1_NOTIF_HOST_EVENT_END = BIT(1), + TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), + TE_V1_NOTIF_HOST_FRAG_START = BIT(4), + TE_V1_NOTIF_HOST_FRAG_END = BIT(5), + TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), }; /* MAC_EVENT_ACTION_API_E_VER_2 */ + +/** + * struct iwl_time_event_cmd_api_v1 - configuring Time Events + * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also + * with version 2. determined by IWL_UCODE_TLV_FLAGS) + * ( TIME_EVENT_CMD = 0x29 ) + * @id_and_color: ID and color of the relevant MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @id: this field has two meanings, depending on the action: + * If the action is ADD, then it means the type of event to add. + * For all other actions it is the unique event ID assigned when the + * event was added by the FW. + * @apply_time: When to start the Time Event (in GP2) + * @max_delay: maximum delay to event's start (apply time), in TU + * @depends_on: the unique ID of the event we depend on (if any) + * @interval: interval between repetitions, in TU + * @interval_reciprocal: 2^32 / interval + * @duration: duration of event in TU + * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS + * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF + * and TE_V1_EVENT_SOCIOPATHIC + * @is_present: 0 or 1, are we present or absent during the Time Event + * @max_frags: maximal number of fragments the Time Event can be divided to + * @notify: notifications using TE_V1_NOTIF_* (whom to notify when) + */ +struct iwl_time_event_cmd_v1 { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + __le32 id; + /* MAC_TIME_EVENT_DATA_API_S_VER_1 */ + __le32 apply_time; + __le32 max_delay; + __le32 dep_policy; + __le32 depends_on; + __le32 is_present; + __le32 max_frags; + __le32 interval; + __le32 interval_reciprocal; + __le32 duration; + __le32 repeat; + __le32 notify; +} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */ + + +/* Time event - defines for command API v2 */ + /* - * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only * the first fragment is scheduled. - * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only + * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only * the first 2 fragments are scheduled. - * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number - * of fragments are valid. + * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. * * Other than the constant defined above, specifying a fragmentation value 'x' * means that the event can be fragmented but only the first 'x' will be * scheduled. */ enum { - TE_FRAG_NONE = 0, - TE_FRAG_SINGLE = 1, - TE_FRAG_DUAL = 2, - TE_FRAG_ENDLESS = 0xffffffff + TE_V2_FRAG_NONE = 0, + TE_V2_FRAG_SINGLE = 1, + TE_V2_FRAG_DUAL = 2, + TE_V2_FRAG_MAX = 0xfe, + TE_V2_FRAG_ENDLESS = 0xff }; /* Repeat the time event endlessly (until removed) */ -#define TE_REPEAT_ENDLESS (0xffffffff) +#define TE_V2_REPEAT_ENDLESS 0xff /* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_REPEAT_MAX_MSK (0x0fffffff) -/* If a Time Event can be fragmented, this is the max number of fragments */ -#define TE_FRAG_MAX_MSK (0x0fffffff) +#define TE_V2_REPEAT_MAX 0xfe + +#define TE_V2_PLACEMENT_POS 12 +#define TE_V2_ABSENCE_POS 15 + +/* Time event policy values (for time event cmd api v2) + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + * + * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable + * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. + * @TE_V2_DEP_OTHER: depends on another time event + * @TE_V2_DEP_TSF: depends on a specific time + * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC + * @TE_V2_ABSENCE: are we present or absent during the Time Event. + */ +enum { + TE_V2_DEFAULT_POLICY = 0x0, + + /* notifications (event start/stop, fragment start/stop) */ + TE_V2_NOTIF_HOST_EVENT_START = BIT(0), + TE_V2_NOTIF_HOST_EVENT_END = BIT(1), + TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), + + TE_V2_NOTIF_HOST_FRAG_START = BIT(4), + TE_V2_NOTIF_HOST_FRAG_END = BIT(5), + TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), + + TE_V2_NOTIF_MSK = 0xff, + + /* placement characteristics */ + TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), + TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), + TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), + + /* are we present or absent during the Time Event. */ + TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), +}; /** - * struct iwl_time_event_cmd - configuring Time Events + * struct iwl_time_event_cmd_api_v2 - configuring Time Events + * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also + * with version 1. determined by IWL_UCODE_TLV_FLAGS) * ( TIME_EVENT_CMD = 0x29 ) * @id_and_color: ID and color of the relevant MAC * @action: action to perform, one of FW_CTXT_ACTION_* @@ -575,32 +703,30 @@ enum { * @max_delay: maximum delay to event's start (apply time), in TU * @depends_on: the unique ID of the event we depend on (if any) * @interval: interval between repetitions, in TU - * @interval_reciprocal: 2^32 / interval * @duration: duration of event in TU * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS - * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF - * @is_present: 0 or 1, are we present or absent during the Time Event * @max_frags: maximal number of fragments the Time Event can be divided to - * @notify: notifications using TE_NOTIF_* (whom to notify when) + * @policy: defines whether uCode shall notify the host or other uCode modules + * on event and/or fragment start and/or end + * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF + * TE_EVENT_SOCIOPATHIC + * using TE_ABSENCE and using TE_NOTIF_* */ -struct iwl_time_event_cmd { +struct iwl_time_event_cmd_v2 { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; __le32 id; - /* MAC_TIME_EVENT_DATA_API_S_VER_1 */ + /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ __le32 apply_time; __le32 max_delay; - __le32 dep_policy; __le32 depends_on; - __le32 is_present; - __le32 max_frags; __le32 interval; - __le32 interval_reciprocal; __le32 duration; - __le32 repeat; - __le32 notify; -} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */ + u8 repeat; + u8 max_frags; + __le16 policy; +} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ /** * struct iwl_time_event_resp - response structure to iwl_time_event_cmd diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index ad9bbca..7ed94a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -166,7 +166,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, WARN_ONCE(!le32_to_cpu(notif->status), "Failed to schedule time event\n"); - if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { + if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { IWL_DEBUG_TE(mvm, "TE ended - current time %lu, estimated end %lu\n", jiffies, te_data->end_jiffies); @@ -189,7 +189,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, } iwl_mvm_te_clear_data(mvm, te_data); - } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { + } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { te_data->running = true; te_data->end_jiffies = jiffies + TU_TO_JIFFIES(te_data->duration); @@ -257,10 +257,67 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, return true; } +/* used to convert from time event API v2 to v1 */ +#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\ + TE_V2_EVENT_SOCIOPATHIC) +static inline u16 te_v2_get_notify(__le16 policy) +{ + return le16_to_cpu(policy) & TE_V2_NOTIF_MSK; +} + +static inline u16 te_v2_get_dep_policy(__le16 policy) +{ + return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >> + TE_V2_PLACEMENT_POS; +} + +static inline u16 te_v2_get_absence(__le16 policy) +{ + return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS; +} + +static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2, + struct iwl_time_event_cmd_v1 *cmd_v1) +{ + cmd_v1->id_and_color = cmd_v2->id_and_color; + cmd_v1->action = cmd_v2->action; + cmd_v1->id = cmd_v2->id; + cmd_v1->apply_time = cmd_v2->apply_time; + cmd_v1->max_delay = cmd_v2->max_delay; + cmd_v1->depends_on = cmd_v2->depends_on; + cmd_v1->interval = cmd_v2->interval; + cmd_v1->duration = cmd_v2->duration; + if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS) + cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS); + else + cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat); + cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags); + cmd_v1->interval_reciprocal = 0; /* unused */ + + cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy)); + cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy)); + cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy)); +} + +static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm, + const struct iwl_time_event_cmd_v2 *cmd) +{ + struct iwl_time_event_cmd_v1 cmd_v1; + + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2) + return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, + sizeof(*cmd), cmd); + + iwl_mvm_te_v2_to_v1(cmd, &cmd_v1); + return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, + sizeof(cmd_v1), &cmd_v1); +} + + static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_time_event_data *te_data, - struct iwl_time_event_cmd *te_cmd) + struct iwl_time_event_cmd_v2 *te_cmd) { static const u8 time_event_response[] = { TIME_EVENT_CMD }; struct iwl_notification_wait wait_time_event; @@ -296,8 +353,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, ARRAY_SIZE(time_event_response), iwl_mvm_time_event_response, te_data); - ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, - sizeof(*te_cmd), te_cmd); + ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd); if (ret) { IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); @@ -324,7 +380,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - struct iwl_time_event_cmd time_cmd = {}; + struct iwl_time_event_cmd_v2 time_cmd = {}; lockdep_assert_held(&mvm->mutex); @@ -359,17 +415,14 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, time_cmd.apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); - time_cmd.dep_policy = TE_INDEPENDENT; - time_cmd.is_present = cpu_to_le32(1); - time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE); + time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.max_delay = cpu_to_le32(500); /* TODO: why do we need to interval = bi if it is not periodic? */ time_cmd.interval = cpu_to_le32(1); - time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1)); time_cmd.duration = cpu_to_le32(duration); - time_cmd.repeat = cpu_to_le32(1); - time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START | - TE_NOTIF_HOST_EVENT_END); + time_cmd.repeat = 1; + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_NOTIF_HOST_EVENT_END); iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } @@ -383,7 +436,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_time_event_data *te_data) { - struct iwl_time_event_cmd time_cmd = {}; + struct iwl_time_event_cmd_v2 time_cmd = {}; u32 id, uid; int ret; @@ -420,8 +473,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); - ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, - sizeof(time_cmd), &time_cmd); + ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd); if (WARN_ON(ret)) return; } @@ -441,7 +493,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - struct iwl_time_event_cmd time_cmd = {}; + struct iwl_time_event_cmd_v2 time_cmd = {}; lockdep_assert_held(&mvm->mutex); if (te_data->running) { @@ -472,8 +524,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } time_cmd.apply_time = cpu_to_le32(0); - time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT); - time_cmd.is_present = cpu_to_le32(1); time_cmd.interval = cpu_to_le32(1); /* @@ -482,12 +532,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * scheduled. To improve the chances of it being scheduled, allow them * to be fragmented, and in addition allow them to be delayed. */ - time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20); + time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); - time_cmd.repeat = cpu_to_le32(1); - time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START | - TE_NOTIF_HOST_EVENT_END); + time_cmd.repeat = 1; + time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | + TE_V2_NOTIF_HOST_EVENT_END); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } -- cgit v0.10.2 From 6965a3540a4b45ee5b6fa91276a8134e25e17b63 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Sat, 10 Aug 2013 16:35:45 +0300 Subject: iwlwifi: pcie: don't swallow error codes in iwl_trans_pcie_alloc() The iwl_trans_pcie_alloc() function doesn't pass up error codes returned from functions it calls, swallowing them and returning NULL in all failure cases. The caller checks if the return value is NULL and returns -ENOMEM. This is not correct, because in certain cases the failure was not due to an OOM situation. To fix this, modify the iwl_trans_pcie_alloc() function to use ERR_PTR() to return error codes and clean up the error handling code a bit. Signed-off-by: Luciano Coelho Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 9ec8dfe..e179efe 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -324,8 +324,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int ret; iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); - if (iwl_trans == NULL) - return -ENOMEM; + if (IS_ERR(iwl_trans)) + return PTR_ERR(iwl_trans); pci_set_drvdata(pdev, iwl_trans); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index d88c0c7..ff9787f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1381,9 +1381,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans = kzalloc(sizeof(struct iwl_trans) + sizeof(struct iwl_trans_pcie), GFP_KERNEL); - - if (!trans) - return NULL; + if (!trans) { + err = -ENOMEM; + goto out; + } trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1406,10 +1407,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } - if (pci_enable_device(pdev)) { - err = -ENODEV; + err = pci_enable_device(pdev); + if (err) goto out_no_pci; - } pci_set_master(pdev); @@ -1478,17 +1478,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, SLAB_HWCACHE_ALIGN, NULL); - if (!trans->dev_cmd_pool) + if (!trans->dev_cmd_pool) { + err = -ENOMEM; goto out_pci_disable_msi; + } trans_pcie->inta_mask = CSR_INI_SET_MASK; if (iwl_pcie_alloc_ict(trans)) goto out_free_cmd_pool; - if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict, - iwl_pcie_irq_handler, - IRQF_SHARED, DRV_NAME, trans)) { + err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict, + iwl_pcie_irq_handler, + IRQF_SHARED, DRV_NAME, trans); + if (err) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } @@ -1507,5 +1510,6 @@ out_pci_disable_device: pci_disable_device(pdev); out_no_pci: kfree(trans); - return NULL; +out: + return ERR_PTR(err); } -- cgit v0.10.2 From ab3077009f3e5237aaddaf0d5d139b1fd59d3b32 Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Sun, 4 Aug 2013 12:40:37 +0300 Subject: iwlwifi: mvm: remove rate_scale_data debugfs entry This isn't very informative and can be deduced from rate_scale_table. Remove as a preparation for dropping iwl_rs_rate_info.ieee. Signed-off-by: Eyal Shapira Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index d680c89..199f0cf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -3009,32 +3009,6 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = { .llseek = default_llseek, }; -static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, - char __user *user_buf, size_t count, loff_t *ppos) -{ - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; - char buff[120]; - int desc = 0; - - if (is_Ht(tbl->lq_type)) - desc += sprintf(buff+desc, - "Bit Rate= %d Mb/s\n", - tbl->expected_tpt[lq_sta->last_txrate_idx]); - else - desc += sprintf(buff+desc, - "Bit Rate= %d Mb/s\n", - iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); - - return simple_read_from_buffer(user_buf, count, ppos, buff, desc); -} - -static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { - .read = rs_sta_dbgfs_rate_scale_data_read, - .open = simple_open, - .llseek = default_llseek, -}; - static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) { struct iwl_lq_sta *lq_sta = mvm_sta; @@ -3044,9 +3018,6 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) lq_sta->rs_sta_dbgfs_stats_table_file = debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta, &rs_sta_dbgfs_stats_table_ops); - lq_sta->rs_sta_dbgfs_rate_scale_data_file = - debugfs_create_file("rate_scale_data", S_IRUSR, dir, - lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, &lq_sta->tx_agg_tid_en); @@ -3057,7 +3028,6 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta) struct iwl_lq_sta *lq_sta = mvm_sta; debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); - debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); } #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 4a99a4d..917a48f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -330,7 +330,6 @@ struct iwl_lq_sta { #ifdef CONFIG_MAC80211_DEBUGFS struct dentry *rs_sta_dbgfs_scale_table_file; struct dentry *rs_sta_dbgfs_stats_table_file; - struct dentry *rs_sta_dbgfs_rate_scale_data_file; struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; u32 dbg_fixed_rate; #endif -- cgit v0.10.2 From 2ab9ba0fdfa20e81c4454c57f534585a264cb238 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sun, 11 Aug 2013 02:03:21 +0300 Subject: iwlwifi: pcie: returning positive instead of negative There is a missing '-' character here so we return positive 'ENOMEM' instead of negative. The caller doesn't care. All non-zero returns are translated to '-ENOMEM' in iwl_pcie_nic_init(). This is just a cleanup. Signed-off-by: Dan Carpenter Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 12c9c00..f45eb29 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -838,7 +838,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq) { IWL_ERR(trans, "Not enough memory for txq\n"); - ret = ENOMEM; + ret = -ENOMEM; goto error; } -- cgit v0.10.2 From ab055ce9f5a9ff7dddf73f58bf1164e96b8af713 Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Sun, 4 Aug 2013 13:10:31 +0300 Subject: iwlwifi: mvm: remove unused fields of iwl_rs_rate_info Some more cleanups of unused fields and their initializations. Signed-off-by: Eyal Shapira Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 199f0cf..a69bd4c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -82,41 +82,36 @@ static const u8 ant_toggle_lookup[] = { [ANT_ABC] = ANT_ABC, }; -#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ +#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ IWL_RATE_SISO_##s##M_PLCP, \ IWL_RATE_MIMO2_##s##M_PLCP,\ IWL_RATE_MIMO3_##s##M_PLCP,\ - IWL_RATE_##r##M_IEEE, \ - IWL_RATE_##ip##M_INDEX, \ - IWL_RATE_##in##M_INDEX, \ IWL_RATE_##rp##M_INDEX, \ - IWL_RATE_##rn##M_INDEX, \ - IWL_RATE_##pp##M_INDEX, \ - IWL_RATE_##np##M_INDEX } + IWL_RATE_##rn##M_INDEX } /* * Parameter order: - * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * rate, ht rate, prev rate, next rate * * If there isn't a valid next or previous rate then INV is used which * maps to IWL_RATE_INVALID * */ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = { - IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ - IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ - IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ - IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ - IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ - IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ - IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ - IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ - IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ - IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ - IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ - IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ - IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ + IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */ /* FIXME:RS: ^^ should be INV (legacy) */ }; diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 917a48f..9ac7c0b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -39,13 +39,8 @@ struct iwl_rs_rate_info { u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ - u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ - u8 prev_ieee; /* previous rate in IEEE speeds */ - u8 next_ieee; /* next rate in IEEE speeds */ u8 prev_rs; /* previous rate used in rs algo */ u8 next_rs; /* next rate used in rs algo */ - u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ - u8 next_rs_tgg; /* next rate used in TGG rs algo */ }; #define IWL_RATE_60M_PLCP 3 @@ -120,23 +115,6 @@ enum { IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, }; -/* MAC header values for bit rates */ -enum { - IWL_RATE_6M_IEEE = 12, - IWL_RATE_9M_IEEE = 18, - IWL_RATE_12M_IEEE = 24, - IWL_RATE_18M_IEEE = 36, - IWL_RATE_24M_IEEE = 48, - IWL_RATE_36M_IEEE = 72, - IWL_RATE_48M_IEEE = 96, - IWL_RATE_54M_IEEE = 108, - IWL_RATE_60M_IEEE = 120, - IWL_RATE_1M_IEEE = 2, - IWL_RATE_2M_IEEE = 4, - IWL_RATE_5M_IEEE = 11, - IWL_RATE_11M_IEEE = 22, -}; - #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) #define IWL_INVALID_VALUE -1 -- cgit v0.10.2 From faec6f91f52838ead8dabb8545b1312bdd32da4b Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Wed, 17 Jul 2013 09:16:30 +0300 Subject: iwlwifi: mvm: don't clear tbl->win mistakenly rs_get_tbl_info_from_mcs() mistakenly clears the rate histories window, overriding its initialization values (i.e. filling it with 0, instead of -1). Signed-off-by: Eliad Peller Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index a69bd4c..a587b21 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -492,7 +492,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); u8 mcs; - memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); + memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win)); *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); if (*rate_idx == IWL_RATE_INVALID) { -- cgit v0.10.2 From e3c588ec0d9ef4e52caf0704a007440fb381d97f Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Sun, 7 Apr 2013 14:08:59 +0300 Subject: iwlwifi: mvm: Add basic uAPSD client support Implement basic uAPSD client support adding the following: - Advertise uAPSD support in HW capabilities - Set all ACs trigger- and delivery-enabled - Set max SP length to 2 buffered frames - Assign QNDP with the highest TID with no mandatory admission control required - Set uAPSD related parameters in Power Table command Signed-off-by: Alexander Bondar Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h index 64656e0..33f98fc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -67,5 +67,11 @@ #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) +#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) +#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) +#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20 +#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20 +#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50 +#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 3cdc005..14811a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -424,7 +424,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file, struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->dbgfs_data; - char buf[256]; + char buf[512]; int bufsz = sizeof(buf); int pos; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 060e630..bb010b3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -164,10 +164,10 @@ struct iwl_powertable_cmd { * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct * values. - * @heavy_traffic_thr_tx_pkts: TX threshold measured in number of packets - * @heavy_traffic_thr_rx_pkts: RX threshold measured in number of packets - * @heavy_traffic_thr_tx_load: TX threshold measured in load's percentage - * @heavy_traffic_thr_rx_load: RX threshold measured in load's percentage + * @heavy_tx_thld_packets: TX threshold measured in number of packets + * @heavy_rx_thld_packets: RX threshold measured in number of packets + * @heavy_tx_thld_percentage: TX threshold measured in load's percentage + * @heavy_rx_thld_percentage: RX threshold measured in load's percentage * @limited_ps_threshold: */ struct iwl_mac_power_cmd { @@ -189,10 +189,10 @@ struct iwl_mac_power_cmd { u8 qndp_tid; u8 uapsd_ac_flags; u8 uapsd_max_sp; - u8 heavy_traffic_threshold_tx_packets; - u8 heavy_traffic_threshold_rx_packets; - u8 heavy_traffic_threshold_tx_percentage; - u8 heavy_traffic_threshold_rx_percentage; + u8 heavy_tx_thld_packets; + u8 heavy_rx_thld_packets; + u8 heavy_tx_thld_percentage; + u8 heavy_rx_thld_percentage; u8 limited_ps_threshold; u8 reserved; } __packed; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 05daa90..66803b9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -155,7 +155,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_HW_TIMING_BEACON_ONLY | IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; + IEEE80211_HW_SUPPORTS_STATIC_SMPS | + IEEE80211_HW_SUPPORTS_UAPSD; hw->queues = IWL_MVM_FIRST_AGG_QUEUE; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; @@ -190,6 +191,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + hw->uapsd_queues = IWL_UAPSD_AC_INFO; + hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); @@ -812,7 +815,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); - } else if (changes & BSS_CHANGED_PS) { + } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) { ret = iwl_mvm_power_update_mode(mvm, vif); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 76f6a1f..014d779 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -153,6 +153,11 @@ enum iwl_power_scheme { }; #define IWL_CONN_MAX_LISTEN_INTERVAL 70 +#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) +#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 struct iwl_mvm_power_ops { int (*power_update_mode)(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 4e7c9f2..9c9b5ba 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -140,17 +140,30 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm, IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", le16_to_cpu(cmd->keep_alive_seconds)); - if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { - IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", - le32_to_cpu(cmd->rx_data_timeout)); - IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", - le32_to_cpu(cmd->tx_data_timeout)); - if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) - IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", - cmd->skip_dtim_periods); - if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) - IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", - cmd->lprx_rssi_threshold); + if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) { + IWL_DEBUG_POWER(mvm, "Disable power management\n"); + return; + } + + IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", + le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", + le32_to_cpu(cmd->tx_data_timeout)); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) + IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", + cmd->skip_dtim_periods); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) + IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", + cmd->lprx_rssi_threshold); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { + IWL_DEBUG_POWER(mvm, "uAPSD enabled\n"); + IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", + le32_to_cpu(cmd->rx_data_timeout_uapsd)); + IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n", + le32_to_cpu(cmd->tx_data_timeout_uapsd)); + IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid); + IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags); + IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp); } } @@ -166,6 +179,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, bool radar_detect = false; struct iwl_mvm_vif *mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif); + enum ieee80211_ac_numbers ac; + bool tid_found = false; cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); @@ -235,6 +250,49 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); } + for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { + if (!mvmvif->queue_params[ac].uapsd) + continue; + + cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); + cmd->uapsd_ac_flags |= BIT(ac); + + /* QNDP TID - the highest TID with no admission control */ + if (!tid_found && !mvmvif->queue_params[ac].acm) { + tid_found = true; + switch (ac) { + case IEEE80211_AC_VO: + cmd->qndp_tid = 6; + break; + case IEEE80211_AC_VI: + cmd->qndp_tid = 5; + break; + case IEEE80211_AC_BE: + cmd->qndp_tid = 0; + break; + case IEEE80211_AC_BK: + cmd->qndp_tid = 1; + break; + } + } + } + + if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { + cmd->rx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); + cmd->tx_data_timeout_uapsd = + cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); + cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; + cmd->heavy_tx_thld_packets = + IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; + cmd->heavy_rx_thld_packets = + IWL_MVM_PS_HEAVY_RX_THLD_PACKETS; + cmd->heavy_tx_thld_percentage = + IWL_MVM_PS_HEAVY_TX_THLD_PERCENT; + cmd->heavy_rx_thld_percentage = + IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; + } + #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) cmd->keep_alive_seconds = @@ -342,8 +400,6 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? 0 : 1); - pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", - cmd.skip_dtim_periods); pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", iwlmvm_mod_params.power_scheme); pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", @@ -356,14 +412,51 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, (cmd.flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0); - pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", - le32_to_cpu(cmd.rx_data_timeout)); - pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", - le32_to_cpu(cmd.tx_data_timeout)); + pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", + cmd.skip_dtim_periods); + if (!(cmd.flags & + cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { + pos += scnprintf(buf+pos, bufsz-pos, + "rx_data_timeout = %d\n", + le32_to_cpu(cmd.rx_data_timeout)); + pos += scnprintf(buf+pos, bufsz-pos, + "tx_data_timeout = %d\n", + le32_to_cpu(cmd.tx_data_timeout)); + } if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) pos += scnprintf(buf+pos, bufsz-pos, "lprx_rssi_threshold = %d\n", cmd.lprx_rssi_threshold); + if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { + pos += + scnprintf(buf+pos, bufsz-pos, + "rx_data_timeout_uapsd = %d\n", + le32_to_cpu(cmd.rx_data_timeout_uapsd)); + pos += + scnprintf(buf+pos, bufsz-pos, + "tx_data_timeout_uapsd = %d\n", + le32_to_cpu(cmd.tx_data_timeout_uapsd)); + pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", + cmd.qndp_tid); + pos += scnprintf(buf+pos, bufsz-pos, + "uapsd_ac_flags = 0x%x\n", + cmd.uapsd_ac_flags); + pos += scnprintf(buf+pos, bufsz-pos, + "uapsd_max_sp = %d\n", + cmd.uapsd_max_sp); + pos += scnprintf(buf+pos, bufsz-pos, + "heavy_tx_thld_packets = %d\n", + cmd.heavy_tx_thld_packets); + pos += scnprintf(buf+pos, bufsz-pos, + "heavy_rx_thld_packets = %d\n", + cmd.heavy_rx_thld_packets); + pos += scnprintf(buf+pos, bufsz-pos, + "heavy_tx_thld_percentage = %d\n", + cmd.heavy_tx_thld_percentage); + pos += scnprintf(buf+pos, bufsz-pos, + "heavy_rx_thld_percentage = %d\n", + cmd.heavy_rx_thld_percentage); + } } return pos; } -- cgit v0.10.2 From d972ab31b90cf1c61f6f77a1968244280930c000 Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Sun, 28 Jul 2013 23:02:45 +0000 Subject: iwlwifi: mvm: remove MIMO3 from rate scale code Current and future chips supported by mvm will only have a maximum of 2 antennas so all the MIMO3 related code and states can be dropped. Signed-off-by: Eyal Shapira Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index a587b21..5cb3132 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -86,7 +86,6 @@ static const u8 ant_toggle_lookup[] = { [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ IWL_RATE_SISO_##s##M_PLCP, \ IWL_RATE_MIMO2_##s##M_PLCP,\ - IWL_RATE_MIMO3_##s##M_PLCP,\ IWL_RATE_##rp##M_INDEX, \ IWL_RATE_##rn##M_INDEX } @@ -129,9 +128,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) if (rate_n_flags & RATE_MCS_HT_MSK) { idx = rs_extract_rate(rate_n_flags); - if (idx >= IWL_RATE_MIMO3_6M_PLCP) - idx = idx - IWL_RATE_MIMO3_6M_PLCP; - else if (idx >= IWL_RATE_MIMO2_6M_PLCP) + WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP); + if (idx >= IWL_RATE_MIMO2_6M_PLCP) idx = idx - IWL_RATE_MIMO2_6M_PLCP; idx += IWL_FIRST_OFDM_RATE; @@ -213,20 +211,6 @@ static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ }; -static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */ - {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */ - {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */ - {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */ -}; - -static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */ - {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */ - {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */ - {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */ -}; - /* mbps, mcs */ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { { "1", "BPSK DSSS"}, @@ -274,7 +258,6 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm, lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); @@ -454,7 +437,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm, else if (is_mimo2(tbl->lq_type)) rate_n_flags |= iwl_rates[index].plcp_mimo2; else - rate_n_flags |= iwl_rates[index].plcp_mimo3; + WARN_ON_ONCE(1); } else { IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type); } @@ -531,12 +514,8 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { if (num_of_ant == 2) tbl->lq_type = LQ_MIMO2; - /* MIMO3 */ } else { - if (num_of_ant == 3) { - tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; - tbl->lq_type = LQ_MIMO3; - } + WARN_ON_ONCE(num_of_ant == 3); } } return 0; @@ -602,10 +581,10 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, } else { if (is_siso(rate_type)) return lq_sta->active_siso_rate; - else if (is_mimo2(rate_type)) + else { + WARN_ON_ONCE(!is_mimo2(rate_type)); return lq_sta->active_mimo2_rate; - else - return lq_sta->active_mimo3_rate; + } } } @@ -980,7 +959,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, } /* Choose among many HT tables depending on number of streams - * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation + * (SISO/MIMO2), channel width (20/40), SGI, and aggregation * status */ if (is_siso(tbl->lq_type) && !tbl->is_ht40) ht_tbl_pointer = expected_tpt_siso20MHz; @@ -988,12 +967,10 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, ht_tbl_pointer = expected_tpt_siso40MHz; else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40) ht_tbl_pointer = expected_tpt_mimo2_20MHz; - else if (is_mimo2(tbl->lq_type)) + else { + WARN_ON_ONCE(!is_mimo2(tbl->lq_type)); ht_tbl_pointer = expected_tpt_mimo2_40MHz; - else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40) - ht_tbl_pointer = expected_tpt_mimo3_20MHz; - else /* if (is_mimo3(tbl->lq_type)) <-- must be true */ - ht_tbl_pointer = expected_tpt_mimo3_40MHz; + } if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ tbl->expected_tpt = ht_tbl_pointer[0]; @@ -1165,58 +1142,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm, } /* - * Set up search table for MIMO3 - */ -static int rs_switch_to_mimo3(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, int index) -{ - u16 rate_mask; - s32 rate; - s8 is_green = lq_sta->is_green; - - if (!sta->ht_cap.ht_supported) - return -1; - - if (sta->smps_mode == IEEE80211_SMPS_STATIC) - return -1; - - /* Need both Tx chains/antennas to support MIMO */ - if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3) - return -1; - - IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n"); - - tbl->lq_type = LQ_MIMO3; - tbl->action = 0; - tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; - rate_mask = lq_sta->active_mimo3_rate; - - if (iwl_is_ht40_tx_allowed(sta)) - tbl->is_ht40 = 1; - else - tbl->is_ht40 = 0; - - rs_set_expected_tpt_table(lq_sta, tbl); - - rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index); - - IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n", - rate, rate_mask); - if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { - IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n", - rate, rate_mask); - return -1; - } - tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green); - - IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n", - tbl->current_rate, is_green); - return 0; -} - -/* * Set up search table for SISO */ static int rs_switch_to_siso(struct iwl_mvm *mvm, @@ -1325,21 +1250,14 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm, } break; - case IWL_LEGACY_SWITCH_MIMO2_AB: - case IWL_LEGACY_SWITCH_MIMO2_AC: - case IWL_LEGACY_SWITCH_MIMO2_BC: + case IWL_LEGACY_SWITCH_MIMO2: IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n"); /* Set up search table to try MIMO */ memcpy(search_tbl, tbl, sz); search_tbl->is_SGI = 0; - if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; + search_tbl->ant_type = ANT_AB; if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) @@ -1352,30 +1270,11 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm, goto out; } break; - - case IWL_LEGACY_SWITCH_MIMO3_ABC: - IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n"); - - /* Set up search table to try MIMO3 */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - - search_tbl->ant_type = ANT_ABC; - - if (!rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo3(mvm, lq_sta, sta, - search_tbl, index); - if (!ret) { - lq_sta->action_counter = 0; - goto out; - } - break; + default: + WARN_ON_ONCE(1); } tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2) tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; if (tbl->action == start_action) @@ -1387,7 +1286,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm, out: lq_sta->search_better_tbl = 1; tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2) tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; @@ -1422,7 +1321,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm, case IWL_BT_COEX_TRAFFIC_LOAD_LOW: /* avoid antenna B unless MIMO */ if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) - tbl->action = IWL_SISO_SWITCH_MIMO2_AB; + tbl->action = IWL_SISO_SWITCH_MIMO2; break; case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: @@ -1464,19 +1363,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm, goto out; } break; - case IWL_SISO_SWITCH_MIMO2_AB: - case IWL_SISO_SWITCH_MIMO2_AC: - case IWL_SISO_SWITCH_MIMO2_BC: + case IWL_SISO_SWITCH_MIMO2: IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n"); memcpy(search_tbl, tbl, sz); search_tbl->is_SGI = 0; - if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; + search_tbl->ant_type = ANT_AB; if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) @@ -1517,24 +1409,11 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm, index, is_green); update_search_tbl_counter = 1; goto out; - case IWL_SISO_SWITCH_MIMO3_ABC: - IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n"); - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - search_tbl->ant_type = ANT_ABC; - - if (!rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo3(mvm, lq_sta, sta, - search_tbl, index); - if (!ret) - goto out; - break; + default: + WARN_ON_ONCE(1); } tbl->action++; - if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + if (tbl->action > IWL_SISO_SWITCH_GI) tbl->action = IWL_SISO_SWITCH_ANTENNA1; if (tbl->action == start_action) @@ -1546,7 +1425,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm, out: lq_sta->search_better_tbl = 1; tbl->action++; - if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) + if (tbl->action > IWL_SISO_SWITCH_GI) tbl->action = IWL_SISO_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; @@ -1587,8 +1466,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, break; case IWL_BT_COEX_TRAFFIC_LOAD_LOW: /* avoid antenna B unless MIMO */ - if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || - tbl->action == IWL_MIMO2_SWITCH_SISO_C) + if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) tbl->action = IWL_MIMO2_SWITCH_SISO_A; break; default: @@ -1621,7 +1499,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, break; case IWL_MIMO2_SWITCH_SISO_A: case IWL_MIMO2_SWITCH_SISO_B: - case IWL_MIMO2_SWITCH_SISO_C: IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n"); /* Set up new search table for SISO */ @@ -1629,10 +1506,8 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) search_tbl->ant_type = ANT_A; - else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) + else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */ search_tbl->ant_type = ANT_B; - else - search_tbl->ant_type = ANT_C; if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) @@ -1675,26 +1550,11 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, index, is_green); update_search_tbl_counter = 1; goto out; - - case IWL_MIMO2_SWITCH_MIMO3_ABC: - IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n"); - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - search_tbl->ant_type = ANT_ABC; - - if (!rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo3(mvm, lq_sta, sta, - search_tbl, index); - if (!ret) - goto out; - - break; + default: + WARN_ON_ONCE(1); } tbl->action++; - if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + if (tbl->action > IWL_MIMO2_SWITCH_GI) tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; if (tbl->action == start_action) @@ -1705,7 +1565,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, out: lq_sta->search_better_tbl = 1; tbl->action++; - if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + if (tbl->action > IWL_MIMO2_SWITCH_GI) tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; @@ -1714,171 +1574,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm, } /* - * Try to switch to new modulation mode from MIMO3 - */ -static int rs_move_mimo3_to_other(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct ieee80211_sta *sta, int index) -{ - s8 is_green = lq_sta->is_green; - struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); - struct iwl_scale_tbl_info *search_tbl = - &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); - u8 start_action; - u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw); - u8 tx_chains_num = num_of_ant(valid_tx_ant); - int ret; - u8 update_search_tbl_counter = 0; - - switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { - case IWL_BT_COEX_TRAFFIC_LOAD_NONE: - /* nothing */ - break; - case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: - case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: - /* avoid antenna B and MIMO */ - if (tbl->action != IWL_MIMO3_SWITCH_SISO_A) - tbl->action = IWL_MIMO3_SWITCH_SISO_A; - break; - case IWL_BT_COEX_TRAFFIC_LOAD_LOW: - /* avoid antenna B unless MIMO */ - if (tbl->action == IWL_MIMO3_SWITCH_SISO_B || - tbl->action == IWL_MIMO3_SWITCH_SISO_C) - tbl->action = IWL_MIMO3_SWITCH_SISO_A; - break; - default: - IWL_ERR(mvm, "Invalid BT load %d", - BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); - break; - } - - start_action = tbl->action; - while (1) { - lq_sta->action_counter++; - switch (tbl->action) { - case IWL_MIMO3_SWITCH_ANTENNA1: - case IWL_MIMO3_SWITCH_ANTENNA2: - IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n"); - - if (tx_chains_num <= 3) - break; - - if (window->success_ratio >= IWL_RS_GOOD_RATIO) - break; - - memcpy(search_tbl, tbl, sz); - if (rs_toggle_antenna(valid_tx_ant, - &search_tbl->current_rate, - search_tbl)) - goto out; - break; - case IWL_MIMO3_SWITCH_SISO_A: - case IWL_MIMO3_SWITCH_SISO_B: - case IWL_MIMO3_SWITCH_SISO_C: - IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n"); - - /* Set up new search table for SISO */ - memcpy(search_tbl, tbl, sz); - - if (tbl->action == IWL_MIMO3_SWITCH_SISO_A) - search_tbl->ant_type = ANT_A; - else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B) - search_tbl->ant_type = ANT_B; - else - search_tbl->ant_type = ANT_C; - - if (!rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = rs_switch_to_siso(mvm, lq_sta, sta, - search_tbl, index); - if (!ret) - goto out; - - break; - - case IWL_MIMO3_SWITCH_MIMO2_AB: - case IWL_MIMO3_SWITCH_MIMO2_AC: - case IWL_MIMO3_SWITCH_MIMO2_BC: - IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n"); - - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = 0; - if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB) - search_tbl->ant_type = ANT_AB; - else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC) - search_tbl->ant_type = ANT_AC; - else - search_tbl->ant_type = ANT_BC; - - if (!rs_is_valid_ant(valid_tx_ant, - search_tbl->ant_type)) - break; - - ret = rs_switch_to_mimo2(mvm, lq_sta, sta, - search_tbl, index); - if (!ret) - goto out; - - break; - - case IWL_MIMO3_SWITCH_GI: - if (!tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_20)) - break; - if (tbl->is_ht40 && !(ht_cap->cap & - IEEE80211_HT_CAP_SGI_40)) - break; - - IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n"); - - /* Set up new search table for MIMO */ - memcpy(search_tbl, tbl, sz); - search_tbl->is_SGI = !tbl->is_SGI; - rs_set_expected_tpt_table(lq_sta, search_tbl); - /* - * If active table already uses the fastest possible - * modulation (dual stream with short guard interval), - * and it's working well, there's no need to look - * for a better type of modulation! - */ - if (tbl->is_SGI) { - s32 tpt = lq_sta->last_tpt / 100; - if (tpt >= search_tbl->expected_tpt[index]) - break; - } - search_tbl->current_rate = - rate_n_flags_from_tbl(mvm, search_tbl, - index, is_green); - update_search_tbl_counter = 1; - goto out; - } - tbl->action++; - if (tbl->action > IWL_MIMO3_SWITCH_GI) - tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; - - if (tbl->action == start_action) - break; - } - search_tbl->lq_type = LQ_NONE; - return 0; - out: - lq_sta->search_better_tbl = 1; - tbl->action++; - if (tbl->action > IWL_MIMO3_SWITCH_GI) - tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; - if (update_search_tbl_counter) - search_tbl->action = tbl->action; - - return 0; -} - -/* * Check whether we should continue using same modulation mode, or * begin search for a new mode, based on: * 1) # tx successes or failures while using this mode @@ -2284,8 +1979,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, scale_action = 0; if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= - IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && - (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) { if (lq_sta->last_bt_traffic > BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { /* @@ -2302,8 +1996,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD); if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= - IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && - (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) { /* search for a new modulation */ rs_stay_in_table(lq_sta, true); goto lq_update; @@ -2363,7 +2056,7 @@ lq_update: else if (is_mimo2(tbl->lq_type)) rs_move_mimo2_to_other(mvm, lq_sta, sta, index); else - rs_move_mimo3_to_other(mvm, lq_sta, sta, index); + WARN_ON_ONCE(1); /* If new "search" mode was selected, set up in uCode table */ if (lq_sta->search_better_tbl) { @@ -2528,11 +2221,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, rate_idx -= IWL_FIRST_OFDM_RATE; /* 6M and 9M shared same MCS index */ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO3_6M_PLCP); if (rs_extract_rate(lq_sta->last_rate_n_flags) >= - IWL_RATE_MIMO3_6M_PLCP) - rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM); - else if (rs_extract_rate(lq_sta->last_rate_n_flags) >= - IWL_RATE_MIMO2_6M_PLCP) + IWL_RATE_MIMO2_6M_PLCP) rate_idx = rate_idx + MCS_INDEX_PER_STREAM; info->control.rates[0].flags = IEEE80211_TX_RC_MCS; if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) @@ -2631,16 +2323,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, lq_sta->active_mimo2_rate &= ~((u16)0x2); lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; - lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1; - lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1; - lq_sta->active_mimo3_rate &= ~((u16)0x2); - lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE; - IWL_DEBUG_RATE(mvm, - "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", + "SISO-RATE=%X MIMO2-RATE=%X\n", lq_sta->active_siso_rate, - lq_sta->active_mimo2_rate, - lq_sta->active_mimo3_rate); + lq_sta->active_mimo2_rate); /* These values will be overridden later */ lq_sta->lq.single_stream_ant_msk = @@ -2903,8 +2589,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); if (is_Ht(tbl->lq_type)) { desc += sprintf(buff+desc, " %s", - (is_siso(tbl->lq_type)) ? "SISO" : - ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); + (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); desc += sprintf(buff+desc, " %s", (tbl->is_ht40) ? "40MHz" : "20MHz"); desc += sprintf(buff+desc, " %s %s %s\n", diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 9ac7c0b..335cf16 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -38,7 +38,6 @@ struct iwl_rs_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ - u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ u8 prev_rs; /* previous rate used in rs algo */ u8 next_rs; /* next rate used in rs algo */ }; @@ -143,47 +142,22 @@ enum { #define IWL_LEGACY_SWITCH_ANTENNA1 0 #define IWL_LEGACY_SWITCH_ANTENNA2 1 #define IWL_LEGACY_SWITCH_SISO 2 -#define IWL_LEGACY_SWITCH_MIMO2_AB 3 -#define IWL_LEGACY_SWITCH_MIMO2_AC 4 -#define IWL_LEGACY_SWITCH_MIMO2_BC 5 -#define IWL_LEGACY_SWITCH_MIMO3_ABC 6 +#define IWL_LEGACY_SWITCH_MIMO2 3 /* possible actions when in siso mode */ #define IWL_SISO_SWITCH_ANTENNA1 0 #define IWL_SISO_SWITCH_ANTENNA2 1 -#define IWL_SISO_SWITCH_MIMO2_AB 2 -#define IWL_SISO_SWITCH_MIMO2_AC 3 -#define IWL_SISO_SWITCH_MIMO2_BC 4 -#define IWL_SISO_SWITCH_GI 5 -#define IWL_SISO_SWITCH_MIMO3_ABC 6 - +#define IWL_SISO_SWITCH_MIMO2 2 +#define IWL_SISO_SWITCH_GI 3 /* possible actions when in mimo mode */ #define IWL_MIMO2_SWITCH_ANTENNA1 0 #define IWL_MIMO2_SWITCH_ANTENNA2 1 #define IWL_MIMO2_SWITCH_SISO_A 2 #define IWL_MIMO2_SWITCH_SISO_B 3 -#define IWL_MIMO2_SWITCH_SISO_C 4 -#define IWL_MIMO2_SWITCH_GI 5 -#define IWL_MIMO2_SWITCH_MIMO3_ABC 6 - - -/* possible actions when in mimo3 mode */ -#define IWL_MIMO3_SWITCH_ANTENNA1 0 -#define IWL_MIMO3_SWITCH_ANTENNA2 1 -#define IWL_MIMO3_SWITCH_SISO_A 2 -#define IWL_MIMO3_SWITCH_SISO_B 3 -#define IWL_MIMO3_SWITCH_SISO_C 4 -#define IWL_MIMO3_SWITCH_MIMO2_AB 5 -#define IWL_MIMO3_SWITCH_MIMO2_AC 6 -#define IWL_MIMO3_SWITCH_MIMO2_BC 7 -#define IWL_MIMO3_SWITCH_GI 8 - - -#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI -#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC +#define IWL_MIMO2_SWITCH_GI 4 -/*FIXME:RS:add possible actions for MIMO3*/ +#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI #define IWL_ACTION_LIMIT 3 /* # possible actions */ @@ -218,15 +192,13 @@ enum iwl_table_type { LQ_A, LQ_SISO, /* high-throughput types */ LQ_MIMO2, - LQ_MIMO3, LQ_MAX, }; #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) #define is_siso(tbl) ((tbl) == LQ_SISO) #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) -#define is_mimo3(tbl) ((tbl) == LQ_MIMO3) -#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) +#define is_mimo(tbl) is_mimo2(tbl) #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) #define is_a_band(tbl) ((tbl) == LQ_A) #define is_g_and(tbl) ((tbl) == LQ_G) @@ -298,7 +270,6 @@ struct iwl_lq_sta { u16 active_legacy_rate; u16 active_siso_rate; u16 active_mimo2_rate; - u16 active_mimo3_rate; s8 max_rate_idx; /* Max rate set by user */ u8 missed_rate_counter; -- cgit v0.10.2 From d8eb741eb374804e864751c7f3919ae50321d831 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Fri, 9 Aug 2013 18:58:32 +0200 Subject: mac80211: ibss - do not scan if not needed when creating an IBSS In some cases mac80211 will scan before creating an IBSS even if bssid and frequency have been forced by the user. This is not needed and leads only to a delay in the IBSS establishment phase. Immediately create the cell if both bssid and frequency (and fixed_freq is set) have been specified. Signed-off-by: Antonio Quartulli Signed-off-by: Johannes Berg diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index e08387c..79e294e 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -891,6 +891,17 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) return; } + /* if a fixed bssid and a fixed freq have been provided create the IBSS + * directly and do not waste time scanning + */ + if (ifibss->fixed_bssid && ifibss->fixed_channel) { + sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n", + bssid); + ieee80211_sta_create_ibss(sdata); + return; + } + + ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n"); /* Selected IBSS not found in current scan results - try to scan */ -- cgit v0.10.2 From d1e2586f484dfc36eee2b2d3a6c6c77be67ca492 Mon Sep 17 00:00:00 2001 From: Bing Zhao Date: Fri, 9 Aug 2013 21:09:06 -0700 Subject: mwifiex: fix build error when CONFIG_PM is not set config: make ARCH=m68k allmodconfig All error/warnings: drivers/net/wireless/mwifiex/cfg80211.c: In function 'mwifiex_fill_coalesce_rule_info': >> drivers/net/wireless/mwifiex/cfg80211.c:2493:3: error: implicit declaration of function 'mwifiex_is_pattern_supported' [-Werror=implicit-function-declaration] drivers/net/wireless/mwifiex/cfg80211.c: At top level: drivers/net/wireless/mwifiex/cfg80211.c:2537:12: warning: 'mwifiex_cfg80211_set_coalesce' defined but not used [-Wunused-function] cc1: some warnings being treated as errors Reported-by: kbuild test robot Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 07d2318..ca149ae 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2307,7 +2307,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) } EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); -#ifdef CONFIG_PM static bool mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq, u8 max_byte_seq) @@ -2338,6 +2337,7 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq, return true; } +#ifdef CONFIG_PM static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan) { @@ -2601,8 +2601,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .suspend = mwifiex_cfg80211_suspend, .resume = mwifiex_cfg80211_resume, .set_wakeup = mwifiex_cfg80211_set_wakeup, - .set_coalesce = mwifiex_cfg80211_set_coalesce, #endif + .set_coalesce = mwifiex_cfg80211_set_coalesce, }; #ifdef CONFIG_PM -- cgit v0.10.2 From 0ef71ee1a5b92c038abefd8991d5368e6031d7de Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 7 Aug 2013 19:12:34 +0200 Subject: netfilter: ctnetlink: refactor ctnetlink_create_expect This patch refactors ctnetlink_create_expect by spliting it in two chunks. As a result, we have a new function ctnetlink_alloc_expect to allocate and to setup the expectation from ctnetlink. Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index e842c0d..9aaa68b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2735,76 +2735,26 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr, #endif } -static int -ctnetlink_create_expect(struct net *net, u16 zone, - const struct nlattr * const cda[], - u_int8_t u3, - u32 portid, int report) +static struct nf_conntrack_expect * +ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, + struct nf_conntrack_helper *helper, + struct nf_conntrack_tuple *tuple, + struct nf_conntrack_tuple *mask) { - struct nf_conntrack_tuple tuple, mask, master_tuple; - struct nf_conntrack_tuple_hash *h = NULL; + u_int32_t class = 0; struct nf_conntrack_expect *exp; - struct nf_conn *ct; struct nf_conn_help *help; - struct nf_conntrack_helper *helper = NULL; - u_int32_t class = 0; - int err = 0; - - /* caller guarantees that those three CTA_EXPECT_* exist */ - err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); - if (err < 0) - return err; - err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3); - if (err < 0) - return err; - err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3); - if (err < 0) - return err; - - /* Look for master conntrack of this expectation */ - h = nf_conntrack_find_get(net, zone, &master_tuple); - if (!h) - return -ENOENT; - ct = nf_ct_tuplehash_to_ctrack(h); - - /* Look for helper of this expectation */ - if (cda[CTA_EXPECT_HELP_NAME]) { - const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); - - helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), - nf_ct_protonum(ct)); - if (helper == NULL) { -#ifdef CONFIG_MODULES - if (request_module("nfct-helper-%s", helpname) < 0) { - err = -EOPNOTSUPP; - goto out; - } - - helper = __nf_conntrack_helper_find(helpname, - nf_ct_l3num(ct), - nf_ct_protonum(ct)); - if (helper) { - err = -EAGAIN; - goto out; - } -#endif - err = -EOPNOTSUPP; - goto out; - } - } + int err; if (cda[CTA_EXPECT_CLASS] && helper) { class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS])); - if (class > helper->expect_class_max) { - err = -EINVAL; - goto out; - } + if (class > helper->expect_class_max) + return ERR_PTR(-EINVAL); } exp = nf_ct_expect_alloc(ct); - if (!exp) { - err = -ENOMEM; - goto out; - } + if (!exp) + return ERR_PTR(-ENOMEM); + help = nfct_help(ct); if (!help) { if (!cda[CTA_EXPECT_TIMEOUT]) { @@ -2842,21 +2792,89 @@ ctnetlink_create_expect(struct net *net, u16 zone, exp->class = class; exp->master = ct; exp->helper = helper; - memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); - memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3)); - exp->mask.src.u.all = mask.src.u.all; + exp->tuple = *tuple; + exp->mask.src.u3 = mask->src.u3; + exp->mask.src.u.all = mask->src.u.all; if (cda[CTA_EXPECT_NAT]) { err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT], - exp, u3); + exp, nf_ct_l3num(ct)); if (err < 0) goto err_out; } - err = nf_ct_expect_related_report(exp, portid, report); + return exp; err_out: nf_ct_expect_put(exp); -out: - nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); + return ERR_PTR(err); +} + +static int +ctnetlink_create_expect(struct net *net, u16 zone, + const struct nlattr * const cda[], + u_int8_t u3, u32 portid, int report) +{ + struct nf_conntrack_tuple tuple, mask, master_tuple; + struct nf_conntrack_tuple_hash *h = NULL; + struct nf_conntrack_helper *helper = NULL; + struct nf_conntrack_expect *exp; + struct nf_conn *ct; + int err; + + /* caller guarantees that those three CTA_EXPECT_* exist */ + err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); + if (err < 0) + return err; + err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3); + if (err < 0) + return err; + err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3); + if (err < 0) + return err; + + /* Look for master conntrack of this expectation */ + h = nf_conntrack_find_get(net, zone, &master_tuple); + if (!h) + return -ENOENT; + ct = nf_ct_tuplehash_to_ctrack(h); + + if (cda[CTA_EXPECT_HELP_NAME]) { + const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); + + helper = __nf_conntrack_helper_find(helpname, u3, + nf_ct_protonum(ct)); + if (helper == NULL) { +#ifdef CONFIG_MODULES + if (request_module("nfct-helper-%s", helpname) < 0) { + err = -EOPNOTSUPP; + goto err_ct; + } + helper = __nf_conntrack_helper_find(helpname, u3, + nf_ct_protonum(ct)); + if (helper) { + err = -EAGAIN; + goto err_ct; + } +#endif + err = -EOPNOTSUPP; + goto err_ct; + } + } + + exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); + if (IS_ERR(exp)) { + err = PTR_ERR(exp); + goto err_ct; + } + + err = nf_ct_expect_related_report(exp, portid, report); + if (err < 0) + goto err_exp; + + return 0; +err_exp: + nf_ct_expect_put(exp); +err_ct: + nf_ct_put(ct); return err; } -- cgit v0.10.2 From bd0779370588386e4a67ba5d0b176cfded8e6a53 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 7 Aug 2013 18:13:20 +0200 Subject: netfilter: nfnetlink_queue: allow to attach expectations to conntracks This patch adds the capability to attach expectations via nfnetlink_queue. This is required by conntrack helpers that trigger expectations based on the first packet seen like the TFTP and the DHCPv6 user-space helpers. Signed-off-by: Pablo Neira Ayuso diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 655d5d1..e2cf786be 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -325,6 +325,8 @@ struct nfq_ct_hook { size_t (*build_size)(const struct nf_conn *ct); int (*build)(struct sk_buff *skb, struct nf_conn *ct); int (*parse)(const struct nlattr *attr, struct nf_conn *ct); + int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, + u32 portid, u32 report); }; extern struct nfq_ct_hook __rcu *nfq_ct_hook; diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h index 86267a5..aff88ba 100644 --- a/include/net/netfilter/nfnetlink_queue.h +++ b/include/net/netfilter/nfnetlink_queue.h @@ -15,6 +15,8 @@ int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo); void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, int diff); +int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, + u32 portid, u32 report); #else inline struct nf_conn * nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo) @@ -39,5 +41,11 @@ inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, int diff) { } + +inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, + u32 portid, u32 report) +{ + return 0; +} #endif /* NF_CONNTRACK */ #endif diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index 3a9b921..0132bad 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -46,6 +46,7 @@ enum nfqnl_attr_type { NFQA_CT_INFO, /* enum ip_conntrack_info */ NFQA_CAP_LEN, /* __u32 length of captured packet */ NFQA_SKB_INFO, /* __u32 skb meta information */ + NFQA_EXP, /* nf_conntrack_netlink.h */ __NFQA_MAX }; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 9aaa68b..fa61fea 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1987,6 +1987,27 @@ out: return err == -EAGAIN ? -ENOBUFS : err; } +static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { + [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, + [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, + [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, + [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, + [CTA_EXPECT_ID] = { .type = NLA_U32 }, + [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING, + .len = NF_CT_HELPER_NAME_LEN - 1 }, + [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, + [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, + [CTA_EXPECT_CLASS] = { .type = NLA_U32 }, + [CTA_EXPECT_NAT] = { .type = NLA_NESTED }, + [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING }, +}; + +static struct nf_conntrack_expect * +ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct, + struct nf_conntrack_helper *helper, + struct nf_conntrack_tuple *tuple, + struct nf_conntrack_tuple *mask); + #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT static size_t ctnetlink_nfqueue_build_size(const struct nf_conn *ct) @@ -2127,10 +2148,69 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct) return ret; } +static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda, + const struct nf_conn *ct, + struct nf_conntrack_tuple *tuple, + struct nf_conntrack_tuple *mask) +{ + int err; + + err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE, + nf_ct_l3num(ct)); + if (err < 0) + return err; + + return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK, + nf_ct_l3num(ct)); +} + +static int +ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, + u32 portid, u32 report) +{ + struct nlattr *cda[CTA_EXPECT_MAX+1]; + struct nf_conntrack_tuple tuple, mask; + struct nf_conntrack_helper *helper; + struct nf_conntrack_expect *exp; + int err; + + err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy); + if (err < 0) + return err; + + err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda, + ct, &tuple, &mask); + if (err < 0) + return err; + + if (cda[CTA_EXPECT_HELP_NAME]) { + const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); + + helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), + nf_ct_protonum(ct)); + if (helper == NULL) + return -EOPNOTSUPP; + } + + exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct, + helper, &tuple, &mask); + if (IS_ERR(exp)) + return PTR_ERR(exp); + + err = nf_ct_expect_related_report(exp, portid, report); + if (err < 0) { + nf_ct_expect_put(exp); + return err; + } + + return 0; +} + static struct nfq_ct_hook ctnetlink_nfqueue_hook = { .build_size = ctnetlink_nfqueue_build_size, .build = ctnetlink_nfqueue_build, .parse = ctnetlink_nfqueue_parse, + .attach_expect = ctnetlink_nfqueue_attach_expect, }; #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */ @@ -2498,21 +2578,6 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb, return err; } -static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { - [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, - [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, - [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, - [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, - [CTA_EXPECT_ID] = { .type = NLA_U32 }, - [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING, - .len = NF_CT_HELPER_NAME_LEN - 1 }, - [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, - [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, - [CTA_EXPECT_CLASS] = { .type = NLA_U32 }, - [CTA_EXPECT_NAT] = { .type = NLA_NESTED }, - [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING }, -}; - static int ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index ec9de12..e8c9f3b 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -859,6 +859,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { [NFQA_MARK] = { .type = NLA_U32 }, [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, [NFQA_CT] = { .type = NLA_UNSPEC }, + [NFQA_EXP] = { .type = NLA_UNSPEC }, }; static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { @@ -987,8 +988,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, if (entry == NULL) return -ENOENT; - if (nfqa[NFQA_CT]) + if (nfqa[NFQA_CT]) { ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo); + if (ct && nfqa[NFQA_EXP]) { + nfqnl_attach_expect(ct, nfqa[NFQA_EXP], + NETLINK_CB(skb).portid, + nlmsg_report(nlh)); + } + } if (nfqa[NFQA_PAYLOAD]) { u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c index ab61d66..be89303 100644 --- a/net/netfilter/nfnetlink_queue_ct.c +++ b/net/netfilter/nfnetlink_queue_ct.c @@ -96,3 +96,18 @@ void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, if ((ct->status & IPS_NAT_MASK) && diff) nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff); } + +int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, + u32 portid, u32 report) +{ + struct nfq_ct_hook *nfq_ct; + + if (nf_ct_is_untracked(ct)) + return 0; + + nfq_ct = rcu_dereference(nfq_ct_hook); + if (nfq_ct == NULL) + return -EOPNOTSUPP; + + return nfq_ct->attach_expect(attr, ct, portid, report); +} -- cgit v0.10.2 From cbeee8c65f39045983f7ce0980254c9d66501af0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 9 Aug 2013 18:31:21 +0100 Subject: net: asix: Staticise non-exported symbols Make functions that are only referenced from ops structures static, they do not need to be in the global namespace and sparse complains about this. Signed-off-by: Mark Brown Signed-off-by: David S. Miller diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index d012203..723b387 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -161,7 +161,8 @@ static const struct net_device_ops ax88172a_netdev_ops = { .ndo_set_rx_mode = asix_set_multicast, }; -int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd) +static int ax88172a_get_settings(struct net_device *net, + struct ethtool_cmd *cmd) { if (!net->phydev) return -ENODEV; @@ -169,7 +170,8 @@ int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd) return phy_ethtool_gset(net->phydev, cmd); } -int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd) +static int ax88172a_set_settings(struct net_device *net, + struct ethtool_cmd *cmd) { if (!net->phydev) return -ENODEV; @@ -177,7 +179,7 @@ int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd) return phy_ethtool_sset(net->phydev, cmd); } -int ax88172a_nway_reset(struct net_device *net) +static int ax88172a_nway_reset(struct net_device *net) { if (!net->phydev) return -ENODEV; -- cgit v0.10.2 From 55d10a11aeeca48149d14fcc0b955017f3618882 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 9 Aug 2013 18:31:22 +0100 Subject: net: asix: Move declaration of ax88172a_info to shared header Ensure that the definition of ax88172a_info matches the declaration seen by users and silence sparse warnings about symbols without declarations in the global namespace by moving the declaration into the shared header asix.h. Signed-off-by: Mark Brown Signed-off-by: David S. Miller diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index 346c032..bdaa12d 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -178,6 +178,8 @@ struct asix_common_private { struct asix_rx_fixup_info rx_fixup_info; }; +extern const struct driver_info ax88172a_info; + /* ASIX specific flags */ #define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */ diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index b96ad4f..386a3df 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -946,8 +946,6 @@ static const struct driver_info hg20f9_info = { .data = FLAG_EEPROM_MAC, }; -extern const struct driver_info ax88172a_info; - static const struct usb_device_id products [] = { { // Linksys USB200M -- cgit v0.10.2 From 072017b41e49e2a8e8a4e0258837a614bb5daa8d Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 9 Aug 2013 22:05:36 -0400 Subject: net: sctp: Add rudimentary infrastructure to account for control chunks This patch adds a base infrastructure that allows SCTP to do memory accounting for control chunks. Real accounting code will follow. This patch alos fixes the following triggered bug ... [ 553.109742] kernel BUG at include/linux/skbuff.h:1813! [ 553.109766] invalid opcode: 0000 [#1] SMP [ 553.109789] Modules linked in: sctp libcrc32c rfcomm [...] [ 553.110259] uinput i915 i2c_algo_bit drm_kms_helper e1000e drm ptp pps_core i2c_core wmi video sunrpc [ 553.110320] CPU: 0 PID: 1636 Comm: lt-test_1_to_1_ Not tainted 3.11.0-rc3+ #2 [ 553.110350] Hardware name: LENOVO 74597D6/74597D6, BIOS 6DET60WW (3.10 ) 09/17/2009 [ 553.110381] task: ffff88020a01dd40 ti: ffff880204ed0000 task.ti: ffff880204ed0000 [ 553.110411] RIP: 0010:[] [] skb_orphan.part.9+0x4/0x6 [sctp] [ 553.110459] RSP: 0018:ffff880204ed1bb8 EFLAGS: 00010286 [ 553.110483] RAX: ffff8802086f5a40 RBX: ffff880204303300 RCX: 0000000000000000 [ 553.110487] RDX: ffff880204303c28 RSI: ffff8802086f5a40 RDI: ffff880202158000 [ 553.110487] RBP: ffff880204ed1bb8 R08: 0000000000000000 R09: 0000000000000000 [ 553.110487] R10: ffff88022f2d9a04 R11: ffff880233001600 R12: 0000000000000000 [ 553.110487] R13: ffff880204303c00 R14: ffff8802293d0000 R15: ffff880202158000 [ 553.110487] FS: 00007f31b31fe740(0000) GS:ffff88023bc00000(0000) knlGS:0000000000000000 [ 553.110487] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [ 553.110487] CR2: 000000379980e3e0 CR3: 000000020d225000 CR4: 00000000000407f0 [ 553.110487] Stack: [ 553.110487] ffff880204ed1ca8 ffffffffa068d7fc 0000000000000000 0000000000000000 [ 553.110487] 0000000000000000 ffff8802293d0000 ffff880202158000 ffffffff81cb7900 [ 553.110487] 0000000000000000 0000400000001c68 ffff8802086f5a40 000000000000000f [ 553.110487] Call Trace: [ 553.110487] [] sctp_sendmsg+0x6bc/0xc80 [sctp] [ 553.110487] [] ? sock_has_perm+0x75/0x90 [ 553.110487] [] inet_sendmsg+0x63/0xb0 [ 553.110487] [] ? selinux_socket_sendmsg+0x23/0x30 [ 553.110487] [] sock_sendmsg+0xa6/0xd0 [ 553.110487] [] ? _raw_spin_unlock_bh+0x15/0x20 [ 553.110487] [] SYSC_sendto+0x128/0x180 [ 553.110487] [] ? SYSC_connect+0xdb/0x100 [ 553.110487] [] ? sctp_inet_listen+0x71/0x1f0 [sctp] [ 553.110487] [] SyS_sendto+0xe/0x10 [ 553.110487] [] system_call_fastpath+0x16/0x1b [ 553.110487] Code: e0 48 c7 c7 00 22 6a a0 e8 67 a3 f0 e0 48 c7 [...] [ 553.110487] RIP [] skb_orphan.part.9+0x4/0x6 [sctp] [ 553.110487] RSP [ 553.121578] ---[ end trace 46c20c5903ef5be2 ]--- The approach taken here is to split data and control chunks creation a bit. Data chunks already have memory accounting so noting needs to happen. For control chunks, add stubs handlers. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 9c13133..01e9783 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -62,8 +62,12 @@ #include #include -static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen); +static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen); +static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, + __u8 flags, int paylen); +static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, @@ -76,6 +80,28 @@ static int sctp_process_param(struct sctp_association *asoc, static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data); +/* Control chunk destructor */ +static void sctp_control_release_owner(struct sk_buff *skb) +{ + /*TODO: do memory release */ +} + +static void sctp_control_set_owner_w(struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = chunk->asoc; + struct sk_buff *skb = chunk->skb; + + /* TODO: properly account for control chunks. + * To do it right we'll need: + * 1) endpoint if association isn't known. + * 2) proper memory accounting. + * + * For now don't do anything for now. + */ + skb->sk = asoc ? asoc->base.sk : NULL; + skb->destructor = sctp_control_release_owner; +} + /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) { @@ -290,7 +316,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, * PLEASE DO NOT FIXME [This version does not support Host Name.] */ - retval = sctp_make_chunk(asoc, SCTP_CID_INIT, 0, chunksize); + retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); if (!retval) goto nodata; @@ -437,7 +463,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, num_ext); /* Now allocate and fill out the chunk. */ - retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); + retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) goto nomem_chunk; @@ -542,7 +568,7 @@ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, cookie_len = asoc->peer.cookie_len; /* Build a cookie echo chunk. */ - retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); + retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); if (!retval) goto nodata; retval->subh.cookie_hdr = @@ -587,7 +613,7 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, { struct sctp_chunk *retval; - retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ACK, 0, 0); + retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -635,8 +661,8 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, sctp_cwrhdr_t cwr; cwr.lowest_tsn = htonl(lowest_tsn); - retval = sctp_make_chunk(asoc, SCTP_CID_ECN_CWR, 0, - sizeof(sctp_cwrhdr_t)); + retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, + sizeof(sctp_cwrhdr_t)); if (!retval) goto nodata; @@ -669,8 +695,8 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, sctp_ecnehdr_t ecne; ecne.lowest_tsn = htonl(lowest_tsn); - retval = sctp_make_chunk(asoc, SCTP_CID_ECN_ECNE, 0, - sizeof(sctp_ecnehdr_t)); + retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, + sizeof(sctp_ecnehdr_t)); if (!retval) goto nodata; retval->subh.ecne_hdr = @@ -706,7 +732,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, dp.ssn = htons(ssn); chunk_len = sizeof(dp) + data_len; - retval = sctp_make_chunk(asoc, SCTP_CID_DATA, flags, chunk_len); + retval = sctp_make_data(asoc, flags, chunk_len); if (!retval) goto nodata; @@ -753,7 +779,7 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) + sizeof(__u32) * num_dup_tsns; /* Create the chunk. */ - retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, len); + retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); if (!retval) goto nodata; @@ -832,8 +858,8 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); shut.cum_tsn_ack = htonl(ctsn); - retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN, 0, - sizeof(sctp_shutdownhdr_t)); + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, + sizeof(sctp_shutdownhdr_t)); if (!retval) goto nodata; @@ -851,7 +877,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, { struct sctp_chunk *retval; - retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -880,7 +906,7 @@ struct sctp_chunk *sctp_make_shutdown_complete( */ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; - retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -919,7 +945,7 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, flags = SCTP_CHUNK_FLAG_T; } - retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint); + retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -1111,7 +1137,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, struct sctp_chunk *retval; sctp_sender_hb_info_t hbinfo; - retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); + retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); if (!retval) goto nodata; @@ -1139,7 +1165,7 @@ struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, { struct sctp_chunk *retval; - retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); + retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); if (!retval) goto nodata; @@ -1171,8 +1197,8 @@ static struct sctp_chunk *sctp_make_op_error_space( { struct sctp_chunk *retval; - retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0, - sizeof(sctp_errhdr_t) + size); + retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, + sizeof(sctp_errhdr_t) + size); if (!retval) goto nodata; @@ -1242,7 +1268,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) if (unlikely(!hmac_desc)) return NULL; - retval = sctp_make_chunk(asoc, SCTP_CID_AUTH, 0, + retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); if (!retval) return NULL; @@ -1345,8 +1371,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) /* Create a new chunk, setting the type and flags headers from the * arguments, reserving enough space for a 'paylen' byte payload. */ -static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen) +static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; @@ -1379,14 +1405,27 @@ static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, if (sctp_auth_send_cid(type, asoc)) retval->auth = 1; - /* Set the skb to the belonging sock for accounting. */ - skb->sk = sk; - return retval; nodata: return NULL; } +static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, + __u8 flags, int paylen) +{ + return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); +} + +static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen) +{ + struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); + + if (chunk) + sctp_control_set_owner_w(chunk); + + return chunk; +} /* Release the memory occupied by a chunk. */ static void sctp_chunk_destroy(struct sctp_chunk *chunk) @@ -2727,7 +2766,7 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, length += addrlen; /* Create the chunk. */ - retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF, 0, length); + retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); if (!retval) return NULL; @@ -2911,7 +2950,7 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ - retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF_ACK, 0, length); + retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); if (!retval) return NULL; @@ -3442,7 +3481,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, hint = (nstreams + 1) * sizeof(__u32); - retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint); + retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); if (!retval) return NULL; -- cgit v0.10.2 From ebd8b934e23f45ad3fc8a5a28bc5a96741a6a106 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Sat, 10 Aug 2013 15:22:58 -0700 Subject: pptp: fix byte order warnings Pptp driver has lots of byte order warnings from sparse. This was because the on-the-wire header is in network byte order (obviously) but the definition did not reflect that. Also, the address structure to user space actually put the call id in host order. Rather than break ABI compatibility, just acknowledge the existing design. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 9785a3a..6fa5ae0 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -83,11 +83,11 @@ static const struct proto_ops pptp_ops; struct pptp_gre_header { u8 flags; u8 ver; - u16 protocol; - u16 payload_len; - u16 call_id; - u32 seq; - u32 ack; + __be16 protocol; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; } __packed; static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h index e36a4ae..e128769 100644 --- a/include/uapi/linux/if_pppox.h +++ b/include/uapi/linux/if_pppox.h @@ -46,7 +46,7 @@ struct pppoe_addr { * PPTP addressing definition */ struct pptp_addr { - __be16 call_id; + __u16 call_id; struct in_addr sin_addr; }; -- cgit v0.10.2 From 40dac370efea8d5f2e4de61badf377637053fda7 Mon Sep 17 00:00:00 2001 From: Thierry Escande Date: Mon, 24 Jun 2013 12:02:28 +0200 Subject: NFC: Fix missing static declarations This patch fixes 3 sparse warnings: nfcsim.c:63:25: sparse: symbol 'wq' was not declared. nfcsim.c:484:12: sparse: symbol 'nfcsim_init' was not declared. nfcsim.c:525:13: sparse: symbol 'nfcsim_exit' was not declared. Reported-by: Fengguang Wu Signed-off-by: Thierry Escande Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c index c5c30fb..9a53f13 100644 --- a/drivers/nfc/nfcsim.c +++ b/drivers/nfc/nfcsim.c @@ -60,7 +60,7 @@ struct nfcsim { static struct nfcsim *dev0; static struct nfcsim *dev1; -struct workqueue_struct *wq; +static struct workqueue_struct *wq; static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown) { @@ -481,7 +481,7 @@ static void nfcsim_free_device(struct nfcsim *dev) kfree(dev); } -int __init nfcsim_init(void) +static int __init nfcsim_init(void) { int rc; @@ -522,7 +522,7 @@ exit: return rc; } -void __exit nfcsim_exit(void) +static void __exit nfcsim_exit(void) { nfcsim_cleanup_dev(dev0, 1); nfcsim_cleanup_dev(dev1, 1); -- cgit v0.10.2 From 0293ba201ddd9256552704efef5c2bbf18a78574 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Mon, 24 Jun 2013 14:39:35 +0200 Subject: MAINTAINERS: Change the NFC subsystem status to Supported Signed-off-by: Samuel Ortiz diff --git a/MAINTAINERS b/MAINTAINERS index 1ea2d04..2116b9a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5791,7 +5791,7 @@ M: Aloisio Almeida Jr M: Samuel Ortiz L: linux-wireless@vger.kernel.org L: linux-nfc@lists.01.org (moderated for non-subscribers) -S: Maintained +S: Supported F: net/nfc/ F: include/net/nfc/ F: include/uapi/linux/nfc.h -- cgit v0.10.2 From 23402bddf9e56eecb27bbd1e5467b3b79b3dbe58 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Mon, 12 Aug 2013 13:53:26 +0300 Subject: gianfar: Add flow control support eTSEC has Rx and Tx flow control capabilities that may be enabled through MACCFG1[Rx_Flow, Tx_Flow] bits. These bits must not be set however when eTSEC is operated in Half-Duplex mode. Unfortunately, the driver currently sets these bits unconditionally. This patch adds the proper handling of the PAUSE frame capability register bits by implementing the ethtool -A interface. When pause autoneg is enabled, the controller uses the phy's capability to negotiate PAUSE frame settings with the link partner and reconfigures its Rx_Flow and Tx_Flow settings to match the capabilities of the link partner. If pause autoneg is off, the PAUSE frame generation may be forced manually (ethtool -A). Flow control is disabled by default now. This implementation is inspired by the tg3 driver. Signed-off-by: Lutz Jaenicke Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3cb4647..b2c91dc 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1016,7 +1016,14 @@ static int gfar_probe(struct platform_device *ofdev) /* We need to delay at least 3 TX clocks */ udelay(2); - tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + tempval = 0; + if (!priv->pause_aneg_en && priv->tx_pause_en) + tempval |= MACCFG1_TX_FLOW; + if (!priv->pause_aneg_en && priv->rx_pause_en) + tempval |= MACCFG1_RX_FLOW; + /* the soft reset bit is not self-resetting, so we need to + * clear it before resuming normal operation + */ gfar_write(®s->maccfg1, tempval); /* Initialize MACCFG2. */ @@ -1460,7 +1467,7 @@ static int init_phy(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); uint gigabit_support = priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? - SUPPORTED_1000baseT_Full : 0; + GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; priv->oldlink = 0; @@ -3023,6 +3030,41 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id) return IRQ_HANDLED; } +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +{ + struct phy_device *phydev = priv->phydev; + u32 val = 0; + + if (!phydev->duplex) + return val; + + if (!priv->pause_aneg_en) { + if (priv->tx_pause_en) + val |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + val |= MACCFG1_RX_FLOW; + } else { + u16 lcl_adv, rmt_adv; + u8 flowctrl; + /* get link partner capabilities */ + rmt_adv = 0; + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + lcl_adv = mii_advertise_flowctrl(phydev->advertising); + + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_TX) + val |= MACCFG1_TX_FLOW; + if (flowctrl & FLOW_CTRL_RX) + val |= MACCFG1_RX_FLOW; + } + + return val; +} + /* Called every time the controller might need to be made * aware of new link state. The PHY code conveys this * information through variables in the phydev structure, and this @@ -3041,6 +3083,7 @@ static void adjust_link(struct net_device *dev) lock_tx_qs(priv); if (phydev->link) { + u32 tempval1 = gfar_read(®s->maccfg1); u32 tempval = gfar_read(®s->maccfg2); u32 ecntrl = gfar_read(®s->ecntrl); @@ -3089,6 +3132,10 @@ static void adjust_link(struct net_device *dev) priv->oldspeed = phydev->speed; } + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + tempval1 |= gfar_get_flowctrl_cfg(priv); + + gfar_write(®s->maccfg1, tempval1); gfar_write(®s->maccfg2, tempval); gfar_write(®s->ecntrl, ecntrl); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index ee19f2c..46f56f3 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -146,6 +146,10 @@ extern const char gfar_driver_version[]; | SUPPORTED_Autoneg \ | SUPPORTED_MII) +#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \ + | SUPPORTED_Pause \ + | SUPPORTED_Asym_Pause) + /* TBI register addresses */ #define MII_TBICON 0x11 @@ -1100,7 +1104,11 @@ struct gfar_private { /* Wake-on-LAN enabled */ wol_en:1, /* Enable priorty based Tx scheduling in Hw */ - prio_sched_en:1; + prio_sched_en:1, + /* Flow control flags */ + pause_aneg_en:1, + tx_pause_en:1, + rx_pause_en:1; /* The total tx and rx ring size for the enabled queues */ unsigned int total_tx_ring_size; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 21cd881..d3d7ede 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -535,6 +535,78 @@ static int gfar_sringparam(struct net_device *dev, return err; } +static void gfar_gpauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct gfar_private *priv = netdev_priv(dev); + + epause->autoneg = !!priv->pause_aneg_en; + epause->rx_pause = !!priv->rx_pause_en; + epause->tx_pause = !!priv->tx_pause_en; +} + +static int gfar_spauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 oldadv, newadv; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; + + priv->rx_pause_en = priv->tx_pause_en = 0; + if (epause->rx_pause) { + priv->rx_pause_en = 1; + + if (epause->tx_pause) { + priv->tx_pause_en = 1; + /* FLOW_CTRL_RX & TX */ + newadv = ADVERTISED_Pause; + } else /* FLOW_CTLR_RX */ + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + priv->tx_pause_en = 1; + /* FLOW_CTLR_TX */ + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (epause->autoneg) + priv->pause_aneg_en = 1; + else + priv->pause_aneg_en = 0; + + oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) + /* inform link partner of our + * new flow ctrl settings + */ + return phy_start_aneg(phydev); + + if (!epause->autoneg) { + u32 tempval; + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + if (priv->tx_pause_en) + tempval |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + tempval |= MACCFG1_RX_FLOW; + gfar_write(®s->maccfg1, tempval); + } + } + + return 0; +} + int gfar_set_features(struct net_device *dev, netdev_features_t features) { struct gfar_private *priv = netdev_priv(dev); @@ -1806,6 +1878,8 @@ const struct ethtool_ops gfar_ethtool_ops = { .set_coalesce = gfar_scoalesce, .get_ringparam = gfar_gringparam, .set_ringparam = gfar_sringparam, + .get_pauseparam = gfar_gpauseparam, + .set_pauseparam = gfar_spauseparam, .get_strings = gfar_gstrings, .get_sset_count = gfar_sset_count, .get_ethtool_stats = gfar_fill_stats, -- cgit v0.10.2 From 1972b5b3a66f1b6a9df04cc91faf401354919262 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 25 Jun 2013 12:42:54 +0200 Subject: NFC: Document secure element addition/removal netlink events Signed-off-by: Samuel Ortiz diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 8137dd8..40ada98 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -71,6 +71,11 @@ * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element. * @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform * that some firmware was loaded + * @NFC_EVENT_SE_ADDED: Event emitted when a new secure element is discovered. + * This typically will be sent whenever a new NFC controller with either + * an embedded SE or an UICC one connected to it through SWP. + * @NFC_EVENT_SE_REMOVED: Event emitted when a secure element is removed from + * the system, as a consequence of e.g. an NFC controller being unplugged. */ enum nfc_commands { NFC_CMD_UNSPEC, -- cgit v0.10.2 From 91a32269e31282405db405b9ec9ce1a30568e4b0 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 25 Jun 2013 16:22:08 +0200 Subject: NFC: Define secure element connectivity and transaction events The SE_CONNECTIVITY event is for an SE to request connection to e.g. a modem. The SE_TRANSACTION one is sent when an application running on a specific SE wants to notify the host CPU about the end of a transaction. Those events respectively map to the EVT_CONNECTIVITY and the EVT_TRANSACTION HCI events. Signed-off-by: Samuel Ortiz diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 40ada98..539d604 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -76,6 +76,14 @@ * an embedded SE or an UICC one connected to it through SWP. * @NFC_EVENT_SE_REMOVED: Event emitted when a secure element is removed from * the system, as a consequence of e.g. an NFC controller being unplugged. + * @NFC_EVENT_SE_CONNECTIVITY: This event is emitted whenever a secure element + * is requesting connectivity access. For example a UICC SE may need to + * talk with a sleeping modem and will notify this need by sending this + * event. It is then up to userspace to decide if it will wake the modem + * up or not. + * @NFC_EVENT_SE_TRANSACTION: This event is sent when an application running on + * a specific SE notifies us about the end of a transaction. The parameter + * for this event is the application ID (AID). */ enum nfc_commands { NFC_CMD_UNSPEC, @@ -102,6 +110,8 @@ enum nfc_commands { NFC_CMD_FW_DOWNLOAD, NFC_EVENT_SE_ADDED, NFC_EVENT_SE_REMOVED, + NFC_EVENT_SE_CONNECTIVITY, + NFC_EVENT_SE_TRANSACTION, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; @@ -159,6 +169,7 @@ enum nfc_attrs { NFC_ATTR_FIRMWARE_NAME, NFC_ATTR_SE_INDEX, NFC_ATTR_SE_TYPE, + NFC_ATTR_SE_AID, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; -- cgit v0.10.2 From f1abed171fa565448d229afb814f66ab6d104d44 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 26 Jun 2013 17:53:09 +0200 Subject: NFC: pn533: Fix hardware busy loop when establishing the LLCP link By using the standard setting for the regular pn533 dongles, we no longer wait for ever for an ATR_RES. Without this, a failing ATR_REQ will put the hardware into a busy loop, constantly waiting for an ATR_RES. Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index daf92ac..3c169e3 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -2605,17 +2605,6 @@ static int pn533_setup(struct pn533 *dev) switch (dev->device_type) { case PN533_DEVICE_STD: - max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS; - max_retries.mx_rty_psl = 2; - max_retries.mx_rty_passive_act = - PN533_CONFIG_MAX_RETRIES_NO_RETRY; - - timing.rfu = PN533_CONFIG_TIMING_102; - timing.atr_res_timeout = PN533_CONFIG_TIMING_204; - timing.dep_timeout = PN533_CONFIG_TIMING_409; - - break; - case PN533_DEVICE_PASORI: case PN533_DEVICE_ACR122U: max_retries.mx_rty_atr = 0x2; -- cgit v0.10.2 From 17e9d9d437d1bb52077e562fae9457236dbaa76f Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 26 Jun 2013 18:16:21 +0200 Subject: NFC: pn533: Fix the pn533 polling loop By turning the radio off after each failed polling try, we dramatically improve the pn533 polling loop efficiency. Without this fix, all Android phones running the broadcom NFC stack are almost never detected. Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 3c169e3..fe9d4b7 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -359,6 +359,7 @@ struct pn533 { struct work_struct poll_work; struct work_struct mi_work; struct work_struct tg_work; + struct work_struct rf_work; struct list_head cmd_queue; struct pn533_cmd *cmd; @@ -1660,6 +1661,53 @@ static void pn533_listen_mode_timer(unsigned long data) queue_work(dev->wq, &dev->poll_work); } +static int pn533_rf_complete(struct pn533 *dev, void *arg, + struct sk_buff *resp) +{ + int rc = 0; + + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + + if (IS_ERR(resp)) { + rc = PTR_ERR(resp); + + nfc_dev_err(&dev->interface->dev, "%s RF setting error %d", + __func__, rc); + + return rc; + } + + queue_work(dev->wq, &dev->poll_work); + + dev_kfree_skb(resp); + return rc; +} + +static void pn533_wq_rf(struct work_struct *work) +{ + struct pn533 *dev = container_of(work, struct pn533, rf_work); + struct sk_buff *skb; + int rc; + + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + + skb = pn533_alloc_skb(dev, 2); + if (!skb) + return; + + *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD; + *skb_put(skb, 1) = 0; + + rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb, + pn533_rf_complete, NULL); + if (rc < 0) { + dev_kfree_skb(skb); + nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc); + } + + return; +} + static int pn533_poll_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { @@ -1705,7 +1753,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg, } pn533_poll_next_mod(dev); - queue_work(dev->wq, &dev->poll_work); + /* Not target found, turn radio off */ + queue_work(dev->wq, &dev->rf_work); done: dev_kfree_skb(resp); @@ -2051,6 +2100,7 @@ static int pn533_mod_to_baud(struct pn533 *dev) } } +static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf); #define PASSIVE_DATA_LEN 5 static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) @@ -2127,6 +2177,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, *arg = !comm_mode; + pn533_rf_field(dev->nfc_dev, 0); + rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb, pn533_in_dep_link_up_complete, arg); @@ -2721,6 +2773,7 @@ static int pn533_probe(struct usb_interface *interface, INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data); INIT_WORK(&dev->poll_work, pn533_wq_poll); + INIT_WORK(&dev->rf_work, pn533_wq_rf); dev->wq = alloc_ordered_workqueue("pn533", 0); if (dev->wq == NULL) goto error; -- cgit v0.10.2 From a94e10f7d7ce8e22d3d1cfebdfb7fe0451714799 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 28 Jun 2013 15:43:19 +0200 Subject: NFC: pn533: Request System code from SENSF_REQ Some devices are getting confused when not being asked for their system code with type F. Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index fe9d4b7..fa12f59 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -257,7 +257,7 @@ static const struct pn533_poll_modulations poll_mod[] = { .initiator_data.felica = { .opcode = PN533_FELICA_OPC_SENSF_REQ, .sc = PN533_FELICA_SENSF_SC_ALL, - .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, + .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE, .tsn = 0x03, }, }, @@ -270,7 +270,7 @@ static const struct pn533_poll_modulations poll_mod[] = { .initiator_data.felica = { .opcode = PN533_FELICA_OPC_SENSF_REQ, .sc = PN533_FELICA_SENSF_SC_ALL, - .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, + .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE, .tsn = 0x03, }, }, -- cgit v0.10.2 From 5eef4845619b88957349415b7b1498e00220fa2b Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Mon, 1 Jul 2013 10:58:12 +0200 Subject: NFC: pn533: Unconditionaly select the highest p2p bit rate p2p devices must be able to support 424 kbps, so we should always select that bitrate in initiator mode. Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index fa12f59..ff3e19d 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -2086,20 +2086,6 @@ error: return rc; } -static int pn533_mod_to_baud(struct pn533 *dev) -{ - switch (dev->poll_mod_curr) { - case PN533_POLL_MOD_106KBPS_A: - return 0; - case PN533_POLL_MOD_212KBPS_FELICA: - return 1; - case PN533_POLL_MOD_424KBPS_FELICA: - return 2; - default: - return -EINVAL; - } -} - static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf); #define PASSIVE_DATA_LEN 5 static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, @@ -2107,8 +2093,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct sk_buff *skb; - int rc, baud, skb_len; - u8 *next, *arg; + int rc, skb_len; + u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE]; u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3}; @@ -2126,41 +2112,39 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, return -EBUSY; } - baud = pn533_mod_to_baud(dev); - if (baud < 0) { - nfc_dev_err(&dev->interface->dev, - "Invalid curr modulation %d", dev->poll_mod_curr); - return baud; - } - skb_len = 3 + gb_len; /* ActPass + BR + Next */ - if (comm_mode == NFC_COMM_PASSIVE) - skb_len += PASSIVE_DATA_LEN; + skb_len += PASSIVE_DATA_LEN; - if (target && target->nfcid2_len) - skb_len += NFC_NFCID3_MAXSIZE; + /* NFCID3 */ + skb_len += NFC_NFCID3_MAXSIZE; + if (target && !target->nfcid2_len) { + nfcid3[0] = 0x1; + nfcid3[1] = 0xfe; + get_random_bytes(nfcid3 + 2, 6); + } skb = pn533_alloc_skb(dev, skb_len); if (!skb) return -ENOMEM; *skb_put(skb, 1) = !comm_mode; /* ActPass */ - *skb_put(skb, 1) = baud; /* Baud rate */ + *skb_put(skb, 1) = 0x02; /* 424 kbps */ next = skb_put(skb, 1); /* Next */ *next = 0; - if (comm_mode == NFC_COMM_PASSIVE && baud > 0) { - memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, - PASSIVE_DATA_LEN); - *next |= 1; - } + /* Copy passive data */ + memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN); + *next |= 1; - if (target && target->nfcid2_len) { + /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */ + if (target && target->nfcid2_len) memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2, target->nfcid2_len); - *next |= 2; - } + else + memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3, + NFC_NFCID3_MAXSIZE); + *next |= 2; if (gb != NULL && gb_len > 0) { memcpy(skb_put(skb, gb_len), gb, gb_len); -- cgit v0.10.2 From 3a8eab39ac53f2d35d663634e16b486e8a5a65a9 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Mon, 1 Jul 2013 17:26:58 +0200 Subject: NFC: pn533: Enable AUTO RFCA The AUTO RFCA bit forbids the pn533 chipset to turn its radio on whenever an external field is present. Without this bit set, some devices seems to get over flood by the pn533 rf field and thus become hardly detectable. Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index ff3e19d..125d995 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -168,8 +168,9 @@ struct pn533_fw_version { #define PN533_CFGITEM_MAX_RETRIES 0x05 #define PN533_CFGITEM_PASORI 0x82 -#define PN533_CFGITEM_RF_FIELD_ON 0x1 -#define PN533_CFGITEM_RF_FIELD_OFF 0x0 +#define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2 +#define PN533_CFGITEM_RF_FIELD_ON 0x1 +#define PN533_CFGITEM_RF_FIELD_OFF 0x0 #define PN533_CONFIG_TIMING_102 0xb #define PN533_CONFIG_TIMING_204 0xc @@ -1696,7 +1697,7 @@ static void pn533_wq_rf(struct work_struct *work) return; *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD; - *skb_put(skb, 1) = 0; + *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA; rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb, pn533_rf_complete, NULL); @@ -2598,6 +2599,8 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf) u8 rf_field = !!rf; int rc; + rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA; + rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD, (u8 *)&rf_field, 1); if (rc) { -- cgit v0.10.2 From 1575b9d8668f4ecf2648a08751313c826fb6a6e9 Mon Sep 17 00:00:00 2001 From: Olivier Guiter Date: Thu, 13 Jun 2013 15:43:27 +0200 Subject: NFC: pn533: Add extended information frame decoding support Extended Information frames are slightly different from standard frames as they can (theorically) handle datas up tu 64kB. PN533 firmware only supports packet data up to 265 (incl. TFI byte) This kind of frame are used when the pn533 wants to exchange more than 255 bytes, and this patch handles the reception of such frames. Signed-off-by: Olivier Guiter Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 125d995..ae0fa9e 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -84,11 +84,17 @@ MODULE_DEVICE_TABLE(usb, pn533_table); /* How much time we spend listening for initiators */ #define PN533_LISTEN_TIME 2 -/* Standard pn533 frame definitions */ +/* Standard pn533 frame definitions (standard and extended)*/ #define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \ + 2) /* data[0] TFI, data[1] CC */ #define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/ +#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \ + + 2) /* data[0] TFI, data[1] CC */ + +#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 +#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */ + /* * Max extended frame payload len, excluding TFI and CC * which are already in PN533_FRAME_HEADER_LEN. @@ -99,6 +105,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table); Postamble (1) */ #define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen]) #define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1]) +/* Half start code (3), LEN (4) should be 0xffff for extended frame */ +#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \ + && (hdr)->datalen_checksum == 0xFF) +#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)]) /* start of frame */ #define PN533_STD_FRAME_SOF 0x00FF @@ -124,7 +134,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table); #define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83 /* PN533 Commands */ -#define PN533_STD_FRAME_CMD(f) (f->data[1]) +#define PN533_FRAME_CMD(f) (f->data[1]) #define PN533_CMD_GET_FIRMWARE_VERSION 0x02 #define PN533_CMD_RF_CONFIGURATION 0x32 @@ -406,6 +416,15 @@ struct pn533_std_frame { u8 data[]; } __packed; +struct pn533_ext_frame { /* Extended Information frame */ + u8 preamble; + __be16 start_frame; + __be16 eif_flag; /* fixed to 0xFFFF */ + __be16 datalen; + u8 datalen_checksum; + u8 data[]; +} __packed; + struct pn533_frame_ops { void (*tx_frame_init)(void *frame, u8 cmd_code); void (*tx_frame_finish)(void *frame); @@ -513,7 +532,7 @@ static u8 pn533_acr122_get_cmd_code(void *frame) { struct pn533_acr122_rx_frame *f = frame; - return PN533_STD_FRAME_CMD(f); + return PN533_FRAME_CMD(f); } static struct pn533_frame_ops pn533_acr122_frame_ops = { @@ -532,6 +551,12 @@ static struct pn533_frame_ops pn533_acr122_frame_ops = { .get_cmd_code = pn533_acr122_get_cmd_code, }; +/* The rule: value(high byte) + value(low byte) + checksum = 0 */ +static inline u8 pn533_ext_checksum(u16 value) +{ + return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1; +} + /* The rule: value + checksum = 0 */ static inline u8 pn533_std_checksum(u8 value) { @@ -557,7 +582,7 @@ static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code) frame->preamble = 0; frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF); PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT; - PN533_STD_FRAME_CMD(frame) = cmd_code; + PN533_FRAME_CMD(frame) = cmd_code; frame->datalen = 2; } @@ -583,18 +608,35 @@ static void pn533_std_tx_update_payload_len(void *_frame, int len) static bool pn533_std_rx_frame_is_valid(void *_frame) { u8 checksum; - struct pn533_std_frame *frame = _frame; + struct pn533_std_frame *stdf = _frame; - if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF)) + if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF)) return false; - checksum = pn533_std_checksum(frame->datalen); - if (checksum != frame->datalen_checksum) - return false; + if (likely(!PN533_STD_IS_EXTENDED(stdf))) { + /* Standard frame code */ - checksum = pn533_std_data_checksum(frame->data, frame->datalen); - if (checksum != PN533_STD_FRAME_CHECKSUM(frame)) - return false; + checksum = pn533_std_checksum(stdf->datalen); + if (checksum != stdf->datalen_checksum) + return false; + + checksum = pn533_std_data_checksum(stdf->data, stdf->datalen); + if (checksum != PN533_STD_FRAME_CHECKSUM(stdf)) + return false; + } else { + /* Extended */ + struct pn533_ext_frame *eif = _frame; + + checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen)); + if (checksum != eif->datalen_checksum) + return false; + + /* check data checksum */ + checksum = pn533_std_data_checksum(eif->data, + be16_to_cpu(eif->datalen)); + if (checksum != PN533_EXT_FRAME_CHECKSUM(eif)) + return false; + } return true; } @@ -614,6 +656,14 @@ static inline int pn533_std_rx_frame_size(void *frame) { struct pn533_std_frame *f = frame; + /* check for Extended Information frame */ + if (PN533_STD_IS_EXTENDED(f)) { + struct pn533_ext_frame *eif = frame; + + return sizeof(struct pn533_ext_frame) + + be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN; + } + return sizeof(struct pn533_std_frame) + f->datalen + PN533_STD_FRAME_TAIL_LEN; } @@ -621,8 +671,12 @@ static inline int pn533_std_rx_frame_size(void *frame) static u8 pn533_std_get_cmd_code(void *frame) { struct pn533_std_frame *f = frame; + struct pn533_ext_frame *eif = frame; - return PN533_STD_FRAME_CMD(f); + if (PN533_STD_IS_EXTENDED(f)) + return PN533_FRAME_CMD(eif); + else + return PN533_FRAME_CMD(f); } static struct pn533_frame_ops pn533_std_frame_ops = { @@ -690,6 +744,11 @@ static void pn533_recv_response(struct urb *urb) goto sched_wq; } + if (PN533_STD_IS_EXTENDED((struct pn533_std_frame *)in_frame)) + dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN; + else + dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN; + sched_wq: queue_work(dev->wq, &dev->cmd_complete_work); } -- cgit v0.10.2 From 963a82e07d4e1f95fc423d53912ac0a7fe643b1c Mon Sep 17 00:00:00 2001 From: Olivier Guiter Date: Thu, 13 Jun 2013 13:43:28 +0000 Subject: NFC: pn533: Split large Tx frames in chunks On sending large frames (size > 262), we split it in multiple chunks and send them asynchronously with MI bit. Signed-off-by: Olivier Guiter Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index ae0fa9e..f06ef7c 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -363,12 +363,14 @@ struct pn533 { struct urb *in_urb; struct sk_buff_head resp_q; + struct sk_buff_head fragment_skb; struct workqueue_struct *wq; struct work_struct cmd_work; struct work_struct cmd_complete_work; struct work_struct poll_work; - struct work_struct mi_work; + struct work_struct mi_rx_work; + struct work_struct mi_tx_work; struct work_struct tg_work; struct work_struct rf_work; @@ -378,6 +380,7 @@ struct pn533 { struct mutex cmd_lock; /* protects cmd queue */ void *cmd_complete_mi_arg; + void *cmd_complete_dep_arg; struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; u8 poll_mod_count; @@ -2328,7 +2331,15 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, if (mi) { dev->cmd_complete_mi_arg = arg; - queue_work(dev->wq, &dev->mi_work); + queue_work(dev->wq, &dev->mi_rx_work); + return -EINPROGRESS; + } + + /* Prepare for the next round */ + if (skb_queue_len(&dev->fragment_skb) > 0) { + dev->cmd_complete_dep_arg = arg; + queue_work(dev->wq, &dev->mi_tx_work); + return -EINPROGRESS; } @@ -2349,6 +2360,50 @@ _error: return rc; } +/* Split the Tx skb into small chunks */ +static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb) +{ + struct sk_buff *frag; + int frag_size; + + do { + /* Remaining size */ + if (skb->len > PN533_CMD_DATAFRAME_MAXLEN) + frag_size = PN533_CMD_DATAFRAME_MAXLEN; + else + frag_size = skb->len; + + /* Allocate and reserve */ + frag = pn533_alloc_skb(dev, frag_size); + if (!frag) { + skb_queue_purge(&dev->fragment_skb); + break; + } + + /* Reserve the TG/MI byte */ + skb_reserve(frag, 1); + + /* MI + TG */ + if (frag_size == PN533_CMD_DATAFRAME_MAXLEN) + *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1); + else + *skb_push(frag, sizeof(u8)) = 1; /* TG */ + + memcpy(skb_put(frag, frag_size), skb->data, frag_size); + + /* Reduce the size of incoming buffer */ + skb_pull(skb, frag_size); + + /* Add this to skb_queue */ + skb_queue_tail(&dev->fragment_skb, frag); + + } while (skb->len > 0); + + dev_kfree_skb(skb); + + return skb_queue_len(&dev->fragment_skb); +} + static int pn533_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) @@ -2359,15 +2414,6 @@ static int pn533_transceive(struct nfc_dev *nfc_dev, nfc_dev_dbg(&dev->interface->dev, "%s", __func__); - if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { - /* TODO: Implement support to multi-part data exchange */ - nfc_dev_err(&dev->interface->dev, - "Data length greater than the max allowed: %d", - PN533_CMD_DATAEXCH_DATA_MAXLEN); - rc = -ENOSYS; - goto error; - } - if (!dev->tgt_active_prot) { nfc_dev_err(&dev->interface->dev, "Can't exchange data if there is no active target"); @@ -2395,7 +2441,20 @@ static int pn533_transceive(struct nfc_dev *nfc_dev, break; } default: - *skb_push(skb, sizeof(u8)) = 1; /*TG*/ + /* jumbo frame ? */ + if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { + rc = pn533_fill_fragment_skbs(dev, skb); + if (rc <= 0) + goto error; + + skb = skb_dequeue(&dev->fragment_skb); + if (!skb) { + rc = -EIO; + goto error; + } + } else { + *skb_push(skb, sizeof(u8)) = 1; /* TG */ + } rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE, skb, pn533_data_exchange_complete, @@ -2466,7 +2525,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) static void pn533_wq_mi_recv(struct work_struct *work) { - struct pn533 *dev = container_of(work, struct pn533, mi_work); + struct pn533 *dev = container_of(work, struct pn533, mi_rx_work); struct sk_buff *skb; int rc; @@ -2514,6 +2573,61 @@ error: queue_work(dev->wq, &dev->cmd_work); } +static void pn533_wq_mi_send(struct work_struct *work) +{ + struct pn533 *dev = container_of(work, struct pn533, mi_tx_work); + struct sk_buff *skb; + int rc; + + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + + /* Grab the first skb in the queue */ + skb = skb_dequeue(&dev->fragment_skb); + + if (skb == NULL) { /* No more data */ + /* Reset the queue for future use */ + skb_queue_head_init(&dev->fragment_skb); + goto error; + } + + switch (dev->device_type) { + case PN533_DEVICE_PASORI: + if (dev->tgt_active_prot != NFC_PROTO_FELICA) { + rc = -EIO; + break; + } + + rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU, + skb, + pn533_data_exchange_complete, + dev->cmd_complete_dep_arg); + + break; + + default: + /* Still some fragments? */ + rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE, + skb, + pn533_data_exchange_complete, + dev->cmd_complete_dep_arg); + + break; + } + + if (rc == 0) /* success */ + return; + + nfc_dev_err(&dev->interface->dev, + "Error %d when trying to perform data_exchange", rc); + + dev_kfree_skb(skb); + kfree(dev->cmd_complete_dep_arg); + +error: + pn533_send_ack(dev, GFP_KERNEL); + queue_work(dev->wq, &dev->cmd_work); +} + static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, u8 cfgdata_len) { @@ -2816,7 +2930,8 @@ static int pn533_probe(struct usb_interface *interface, INIT_WORK(&dev->cmd_work, pn533_wq_cmd); INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete); - INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); + INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv); + INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send); INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data); INIT_WORK(&dev->poll_work, pn533_wq_poll); INIT_WORK(&dev->rf_work, pn533_wq_rf); @@ -2829,6 +2944,7 @@ static int pn533_probe(struct usb_interface *interface, dev->listen_timer.function = pn533_listen_mode_timer; skb_queue_head_init(&dev->resp_q); + skb_queue_head_init(&dev->fragment_skb); INIT_LIST_HEAD(&dev->cmd_queue); -- cgit v0.10.2 From 56a63c82cf82eb491af05759d9e9f9b97ca36bc2 Mon Sep 17 00:00:00 2001 From: Olivier Guiter Date: Thu, 13 Jun 2013 13:43:29 +0000 Subject: NFC: pn533: Store the correct frame size (normal vs ext) The extended information frame are sent by PN533 to exchange frames larger than 255 bytes. These extended frame are very close from the standard ones except for the header size length. On each incoming frame, we set the correct header length, and we do that only for the standard pn533 chipsets as the acr122 does not seem to support extended frames properly. Signed-off-by: Olivier Guiter Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index f06ef7c..2f1ebbd 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -435,7 +435,7 @@ struct pn533_frame_ops { int tx_header_len; int tx_tail_len; - bool (*rx_is_frame_valid)(void *frame); + bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev); int (*rx_frame_size)(void *frame); int rx_header_len; int rx_tail_len; @@ -510,7 +510,7 @@ static void pn533_acr122_tx_update_payload_len(void *_frame, int len) frame->datalen += len; } -static bool pn533_acr122_is_rx_frame_valid(void *_frame) +static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev) { struct pn533_acr122_rx_frame *frame = _frame; @@ -608,7 +608,7 @@ static void pn533_std_tx_update_payload_len(void *_frame, int len) frame->datalen += len; } -static bool pn533_std_rx_frame_is_valid(void *_frame) +static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev) { u8 checksum; struct pn533_std_frame *stdf = _frame; @@ -618,6 +618,7 @@ static bool pn533_std_rx_frame_is_valid(void *_frame) if (likely(!PN533_STD_IS_EXTENDED(stdf))) { /* Standard frame code */ + dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN; checksum = pn533_std_checksum(stdf->datalen); if (checksum != stdf->datalen_checksum) @@ -630,6 +631,8 @@ static bool pn533_std_rx_frame_is_valid(void *_frame) /* Extended */ struct pn533_ext_frame *eif = _frame; + dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN; + checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen)); if (checksum != eif->datalen_checksum) return false; @@ -734,7 +737,7 @@ static void pn533_recv_response(struct urb *urb) print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame, dev->ops->rx_frame_size(in_frame), false); - if (!dev->ops->rx_is_frame_valid(in_frame)) { + if (!dev->ops->rx_is_frame_valid(in_frame, dev)) { nfc_dev_err(&dev->interface->dev, "Received an invalid frame"); cmd->status = -EIO; goto sched_wq; @@ -747,11 +750,6 @@ static void pn533_recv_response(struct urb *urb) goto sched_wq; } - if (PN533_STD_IS_EXTENDED((struct pn533_std_frame *)in_frame)) - dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN; - else - dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN; - sched_wq: queue_work(dev->wq, &dev->cmd_complete_work); } -- cgit v0.10.2 From 369f4d503ac12363f5d11b91f849377875d57598 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 24 Jul 2013 14:49:22 +0200 Subject: NFC: Fix SE discovery failure warning condition This is a typo coming from the initial implementation. se_discover fails when it returns something different than zero and we should only display a warning in that case. Signed-off-by: Samuel Ortiz diff --git a/net/nfc/core.c b/net/nfc/core.c index 1d074dd..aad7f8f 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -129,7 +129,7 @@ int nfc_dev_up(struct nfc_dev *dev) /* We have to enable the device before discovering SEs */ if (dev->ops->discover_se) { rc = dev->ops->discover_se(dev); - if (!rc) + if (rc) pr_warn("SE discovery failed\n"); } -- cgit v0.10.2 From ac22ac466a659f1b2e02a2e2ee23fc5c42da2c95 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 24 Jul 2013 18:10:50 +0200 Subject: NFC: Add a GET_SE netlink API In order to fetch the discovered secure elements from an NFC controller, we need to send a netlink command that will dump the list of available SEs from NFC. Signed-off-by: Samuel Ortiz diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 539d604..029921b 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -84,6 +84,7 @@ * @NFC_EVENT_SE_TRANSACTION: This event is sent when an application running on * a specific SE notifies us about the end of a transaction. The parameter * for this event is the application ID (AID). + * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller. */ enum nfc_commands { NFC_CMD_UNSPEC, @@ -112,6 +113,7 @@ enum nfc_commands { NFC_EVENT_SE_REMOVED, NFC_EVENT_SE_CONNECTIVITY, NFC_EVENT_SE_TRANSACTION, + NFC_CMD_GET_SE, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index f16fd59..3b08ef9 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1191,6 +1191,91 @@ static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info) return rc; } +static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev, + u32 portid, u32 seq, + struct netlink_callback *cb, + int flags) +{ + void *hdr; + struct nfc_se *se, *n; + + list_for_each_entry_safe(se, n, &dev->secure_elements, list) { + hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags, + NFC_CMD_GET_SE); + if (!hdr) + goto nla_put_failure; + + if (cb) + genl_dump_check_consistent(cb, hdr, &nfc_genl_family); + + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) || + nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type)) + goto nla_put_failure; + + if (genlmsg_end(msg, hdr) < 0) + goto nla_put_failure; + } + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int nfc_genl_dump_ses(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; + struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; + bool first_call = false; + + if (!iter) { + first_call = true; + iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + cb->args[0] = (long) iter; + } + + mutex_lock(&nfc_devlist_mutex); + + cb->seq = nfc_devlist_generation; + + if (first_call) { + nfc_device_iter_init(iter); + dev = nfc_device_iter_next(iter); + } + + while (dev) { + int rc; + + rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); + if (rc < 0) + break; + + dev = nfc_device_iter_next(iter); + } + + mutex_unlock(&nfc_devlist_mutex); + + cb->args[1] = (long) dev; + + return skb->len; +} + +static int nfc_genl_dump_ses_done(struct netlink_callback *cb) +{ + struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; + + nfc_device_iter_exit(iter); + kfree(iter); + + return 0; +} + static struct genl_ops nfc_genl_ops[] = { { .cmd = NFC_CMD_GET_DEVICE, @@ -1265,6 +1350,12 @@ static struct genl_ops nfc_genl_ops[] = { .doit = nfc_genl_disable_se, .policy = nfc_genl_policy, }, + { + .cmd = NFC_CMD_GET_SE, + .dumpit = nfc_genl_dump_ses, + .done = nfc_genl_dump_ses_done, + .policy = nfc_genl_policy, + }, }; -- cgit v0.10.2 From 46f793b0413cfb234ca4faf7e598f24967e1fd3b Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 3 Jul 2013 14:50:36 +0200 Subject: NFC: pn533: Add delay between each poll frame It seems that some pn533 firmwares go belly up when being asked to send poll frames too frequently. Adding a 10ms delay between each of them calm the chip down and prevent it from crashing. Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 2f1ebbd..9f3868b 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -83,6 +83,8 @@ MODULE_DEVICE_TABLE(usb, pn533_table); /* How much time we spend listening for initiators */ #define PN533_LISTEN_TIME 2 +/* Delay between each poll frame (ms) */ +#define PN533_POLL_INTERVAL 10 /* Standard pn533 frame definitions (standard and extended)*/ #define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \ @@ -368,7 +370,7 @@ struct pn533 { struct workqueue_struct *wq; struct work_struct cmd_work; struct work_struct cmd_complete_work; - struct work_struct poll_work; + struct delayed_work poll_work; struct work_struct mi_rx_work; struct work_struct mi_tx_work; struct work_struct tg_work; @@ -1719,7 +1721,8 @@ static void pn533_listen_mode_timer(unsigned long data) pn533_poll_next_mod(dev); - queue_work(dev->wq, &dev->poll_work); + queue_delayed_work(dev->wq, &dev->poll_work, + msecs_to_jiffies(PN533_POLL_INTERVAL)); } static int pn533_rf_complete(struct pn533 *dev, void *arg, @@ -1738,7 +1741,8 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg, return rc; } - queue_work(dev->wq, &dev->poll_work); + queue_delayed_work(dev->wq, &dev->poll_work, + msecs_to_jiffies(PN533_POLL_INTERVAL)); dev_kfree_skb(resp); return rc; @@ -1880,7 +1884,7 @@ static int pn533_send_poll_frame(struct pn533 *dev) static void pn533_wq_poll(struct work_struct *work) { - struct pn533 *dev = container_of(work, struct pn533, poll_work); + struct pn533 *dev = container_of(work, struct pn533, poll_work.work); struct pn533_poll_modulations *cur_mod; int rc; @@ -1955,6 +1959,7 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev) } pn533_abort_cmd(dev, GFP_KERNEL); + flush_delayed_work(&dev->poll_work); pn533_poll_reset_mod_list(dev); } @@ -2931,7 +2936,7 @@ static int pn533_probe(struct usb_interface *interface, INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv); INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send); INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data); - INIT_WORK(&dev->poll_work, pn533_wq_poll); + INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll); INIT_WORK(&dev->rf_work, pn533_wq_rf); dev->wq = alloc_ordered_workqueue("pn533", 0); if (dev->wq == NULL) @@ -3044,6 +3049,7 @@ static void pn533_disconnect(struct usb_interface *interface) usb_kill_urb(dev->in_urb); usb_kill_urb(dev->out_urb); + flush_delayed_work(&dev->poll_work); destroy_workqueue(dev->wq); skb_queue_purge(&dev->resp_q); -- cgit v0.10.2 From dfccd0f580445d176acea174175b3e6518cc91f7 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 3 Jul 2013 15:14:17 +0200 Subject: NFC: pn533: Add some polling entropy By not always starting the polling loop from the same modulation, we avoid entering infinite loops where devices exporting 2 targets (on 2 different modulations) get the same target activated over and over. If this target is not readable (e.g. a wallet emulating a tag), we will stay in an error loop for ever. Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 9f3868b..5df730b 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -1913,6 +1913,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 im_protocols, u32 tm_protocols) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); + u8 rand_mod; nfc_dev_dbg(&dev->interface->dev, "%s: im protocols 0x%x tm protocols 0x%x", @@ -1936,11 +1937,15 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev, tm_protocols = 0; } - dev->poll_mod_curr = 0; pn533_poll_create_mod_list(dev, im_protocols, tm_protocols); dev->poll_protocols = im_protocols; dev->listen_protocols = tm_protocols; + /* Do not always start polling from the same modulation */ + get_random_bytes(&rand_mod, sizeof(rand_mod)); + rand_mod %= dev->poll_mod_count; + dev->poll_mod_curr = rand_mod; + return pn533_send_poll_frame(dev); } -- cgit v0.10.2 From 926489be1d2b030c17d38fa10b5921bf3409d91d Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Mon, 12 Aug 2013 17:11:15 +0530 Subject: drivers: net: cpsw: Add support for new CPSW IP version present in AM43xx SoC The new IP version which is present in AM43xx SoC has a minor changes and the offsets are same as the previous version, so adding new IP version support in the driver. Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index cd95671..0fcf212 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -83,6 +83,7 @@ do { \ #define CPSW_VERSION_1 0x19010a #define CPSW_VERSION_2 0x19010c #define CPSW_VERSION_3 0x19010f +#define CPSW_VERSION_4 0x190112 #define HOST_PORT_NUM 0 #define SLIVER_SIZE 0x40 @@ -993,6 +994,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) break; case CPSW_VERSION_2: case CPSW_VERSION_3: + case CPSW_VERSION_4: slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); break; } @@ -2018,6 +2020,7 @@ static int cpsw_probe(struct platform_device *pdev) break; case CPSW_VERSION_2: case CPSW_VERSION_3: + case CPSW_VERSION_4: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; priv->hw_stats = ss_regs + CPSW2_HW_STATS; -- cgit v0.10.2 From ef04158e13e827315680cf8449d9af3bd8dc6280 Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Fri, 19 Jul 2013 14:56:08 +0200 Subject: NFC: Move nfc_fw_download_done() definition from private to public This API must be called by NFC drivers, and its prototype was incorrectly placed. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 5f286b7..1005955 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -224,6 +224,8 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gt, u8 gt_len); u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len); +int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name); + int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int ntargets); int nfc_target_lost(struct nfc_dev *dev, u32 target_idx); diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index 820a785..4e2e5a7 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -126,8 +126,6 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter) int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name); int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name); -int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name); - int nfc_dev_up(struct nfc_dev *dev); int nfc_dev_down(struct nfc_dev *dev); -- cgit v0.10.2 From eab10b71a7d62d7cc6db631dba448f1d84df9b53 Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Fri, 19 Jul 2013 14:57:13 +0200 Subject: NFC: pn544: i2c: Add firmware download mode power-on support This is in preparation for pn544-i2c firmware download feature, where we need to know if we're in regular or firmware upload mode. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 8cf64c1..e09c898 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -65,6 +65,7 @@ struct pn544_i2c_phy { unsigned int en_polarity; int powered; + int run_mode; int hard_fault; /* * < 0 if hardware error occured (e.g. i2c err) @@ -122,15 +123,22 @@ out: gpio_set_value(phy->gpio_en, !phy->en_polarity); } +static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode) +{ + gpio_set_value(phy->gpio_fw, run_mode == PN544_FW_MODE ? 1 : 0); + gpio_set_value(phy->gpio_en, phy->en_polarity); + usleep_range(10000, 15000); + + phy->run_mode = run_mode; +} + static int pn544_hci_i2c_enable(void *phy_id) { struct pn544_i2c_phy *phy = phy_id; pr_info(DRIVER_DESC ": %s\n", __func__); - gpio_set_value(phy->gpio_fw, 0); - gpio_set_value(phy->gpio_en, phy->en_polarity); - usleep_range(10000, 15000); + pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE); phy->powered = 1; diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index 0d17da7..1d4b38c 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c @@ -31,9 +31,6 @@ /* Timing restrictions (ms) */ #define PN544_HCI_RESETVEN_TIME 30 -#define HCI_MODE 0 -#define FW_MODE 1 - enum pn544_state { PN544_ST_COLD, PN544_ST_FW_READY, diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h index f47c645..d689f0a 100644 --- a/drivers/nfc/pn544/pn544.h +++ b/drivers/nfc/pn544/pn544.h @@ -24,6 +24,9 @@ #define DRIVER_DESC "HCI NFC driver for PN544" +#define PN544_HCI_MODE 0 +#define PN544_FW_MODE 1 + int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, int phy_payload, struct nfc_hci_dev **hdev); -- cgit v0.10.2 From 74c181d528bd8b5989f424a489262d0742ca31ae Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Mon, 12 Aug 2013 16:41:25 -0700 Subject: tcp: reset reordering est. selectively on timeout On timeout the TCP sender unconditionally resets the estimated degree of network reordering (tp->reordering). The idea behind this is that the estimate is too large to trigger fast recovery (e.g., due to a IP path change). But for example if the sender only had 2 packets outstanding, then a timeout doesn't tell much about reordering. A sender that learns about reordering on big writes and loses packets on small writes will end up falsely retransmitting again and again, especially when reordering is more likely on big writes. Therefore the sender should only suspect that tp->reordering is too high if it could have gone into fast recovery with the (lower) default estimate. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b61274b..e965cc7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1877,8 +1877,13 @@ void tcp_enter_loss(struct sock *sk, int how) } tcp_verify_left_out(tp); - tp->reordering = min_t(unsigned int, tp->reordering, - sysctl_tcp_reordering); + /* Timeout in disordered state after receiving substantial DUPACKs + * suggests that the degree of reordering is over-estimated. + */ + if (icsk->icsk_ca_state <= TCP_CA_Disorder && + tp->sacked_out >= sysctl_tcp_reordering) + tp->reordering = min_t(unsigned int, tp->reordering, + sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); -- cgit v0.10.2 From 352a5f5fb3ad8f829cfd4248fe6119895bda881f Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Fri, 19 Jul 2013 14:57:55 +0200 Subject: NFC: netlink: Add result of firmware operation to completion event Result is added as an NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS attribute containing the standard errno positive value of the completion result. This event will be sent when the firmare download operation is done and will contain the operation result. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 1005955..f68ee68 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -224,7 +224,8 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gt, u8 gt_len); u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len); -int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name); +int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name, + u32 result); int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int ntargets); diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 029921b..29bed72 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -146,6 +146,7 @@ enum nfc_commands { * @NFC_ATTR_FIRMWARE_NAME: Free format firmware version * @NFC_ATTR_SE_INDEX: Secure element index * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED) + * @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status */ enum nfc_attrs { NFC_ATTR_UNSPEC, @@ -172,6 +173,7 @@ enum nfc_attrs { NFC_ATTR_SE_INDEX, NFC_ATTR_SE_TYPE, NFC_ATTR_SE_AID, + NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; diff --git a/net/nfc/core.c b/net/nfc/core.c index aad7f8f..d252912 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -77,11 +77,19 @@ error: return rc; } -int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name) +/** + * nfc_fw_download_done - inform that a firmware download was completed + * + * @dev: The nfc device to which firmware was downloaded + * @firmware_name: The firmware filename + * @result: The positive value of a standard errno value + */ +int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name, + u32 result) { dev->fw_download_in_progress = false; - return nfc_genl_fw_download_done(dev, firmware_name); + return nfc_genl_fw_download_done(dev, firmware_name, result); } EXPORT_SYMBOL(nfc_fw_download_done); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 3b08ef9..68063b2 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1114,7 +1114,8 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info) return rc; } -int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name) +int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, + u32 result) { struct sk_buff *msg; void *hdr; @@ -1129,6 +1130,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name) goto free_msg; if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) || + nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) || nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index 4e2e5a7..aaf606f 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -124,7 +124,8 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter) } int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name); -int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name); +int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, + u32 result); int nfc_dev_up(struct nfc_dev *dev); -- cgit v0.10.2 From 8bd7fc89958c2f23a5c5d0113ff65713683041ea Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Fri, 19 Jul 2013 14:58:39 +0200 Subject: NFC: pn544: Add firmware operations hci ops The firmware operation callback is passed by the physical layer to the hci driver during probe. All the driver does is to store it and call it when the fw_upload hci ops is invoked. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index e09c898..017dc61 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -428,7 +428,7 @@ static int pn544_hci_i2c_probe(struct i2c_client *client, r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM, - PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev); + PN544_HCI_I2C_LLC_MAX_PAYLOAD, NULL, &phy->hdev); if (r < 0) goto err_hci; diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c index b5d3d18..ee67de5 100644 --- a/drivers/nfc/pn544/mei.c +++ b/drivers/nfc/pn544/mei.c @@ -45,7 +45,7 @@ static int pn544_mei_probe(struct mei_cl_device *device, r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, - &phy->hdev); + NULL, &phy->hdev); if (r < 0) { nfc_mei_phy_free(phy); diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index 1d4b38c..078e62f 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c @@ -127,6 +127,8 @@ struct pn544_hci_info { int async_cb_type; data_exchange_cb_t async_cb; void *async_cb_context; + + fw_download_t fw_download; }; static int pn544_hci_open(struct nfc_hci_dev *hdev) @@ -779,6 +781,17 @@ exit: return r; } +static int pn544_hci_fw_download(struct nfc_hci_dev *hdev, + const char *firmware_name) +{ + struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); + + if (info->fw_download == NULL) + return -ENOTSUPP; + + return info->fw_download(info->phy_id, firmware_name); +} + static struct nfc_hci_ops pn544_hci_ops = { .open = pn544_hci_open, .close = pn544_hci_close, @@ -793,11 +806,12 @@ static struct nfc_hci_ops pn544_hci_ops = { .tm_send = pn544_hci_tm_send, .check_presence = pn544_hci_check_presence, .event_received = pn544_hci_event_received, + .fw_download = pn544_hci_fw_download, }; int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, int phy_payload, - struct nfc_hci_dev **hdev) + fw_download_t fw_download, struct nfc_hci_dev **hdev) { struct pn544_hci_info *info; u32 protocols; @@ -813,6 +827,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, info->phy_ops = phy_ops; info->phy_id = phy_id; + info->fw_download = fw_download; info->state = PN544_ST_COLD; mutex_init(&info->info_lock); diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h index d689f0a..01020e5 100644 --- a/drivers/nfc/pn544/pn544.h +++ b/drivers/nfc/pn544/pn544.h @@ -27,9 +27,11 @@ #define PN544_HCI_MODE 0 #define PN544_FW_MODE 1 +typedef int (*fw_download_t)(void *context, const char *firmware_name); + int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, int phy_payload, - struct nfc_hci_dev **hdev); + fw_download_t fw_download, struct nfc_hci_dev **hdev); void pn544_hci_remove(struct nfc_hci_dev *hdev); #endif /* __LOCAL_PN544_H_ */ -- cgit v0.10.2 From 06c660340f1e142b607541ece3520fff3f5d2c39 Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Fri, 19 Jul 2013 14:59:45 +0200 Subject: NFC: pn544: i2c: Add firmware download implementation for pn544 The pn544 can enter a firmware update mode where firmware blobs can be pushed through the i2c line and flashed on the target. A special command allows to verify that blobs are correctly flashed and this is what we do for every downloaded firmware blob. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 017dc61..01e27d4 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -25,11 +25,14 @@ #include #include #include - +#include +#include +#include #include #include #include +#include #include "pn544.h" @@ -55,6 +58,58 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table); #define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c" +#define PN544_FW_CMD_WRITE 0x08 +#define PN544_FW_CMD_CHECK 0x06 + +struct pn544_i2c_fw_frame_write { + u8 cmd; + u16 be_length; + u8 be_dest_addr[3]; + u16 be_datalen; + u8 data[]; +} __packed; + +struct pn544_i2c_fw_frame_check { + u8 cmd; + u16 be_length; + u8 be_start_addr[3]; + u16 be_datalen; + u16 be_crc; +} __packed; + +struct pn544_i2c_fw_frame_response { + u8 status; + u16 be_length; +} __packed; + +struct pn544_i2c_fw_blob { + u32 be_size; + u32 be_destaddr; + u8 data[]; +}; + +#define PN544_FW_CMD_RESULT_TIMEOUT 0x01 +#define PN544_FW_CMD_RESULT_BAD_CRC 0x02 +#define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08 +#define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B +#define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11 +#define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18 +#define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74 + +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) + +#define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7 +#define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE +#define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8 +#define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\ + PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\ + PN544_FW_WRITE_BUFFER_MAX_LEN) + +#define FW_WORK_STATE_IDLE 1 +#define FW_WORK_STATE_START 2 +#define FW_WORK_STATE_WAIT_WRITE_ANSWER 3 +#define FW_WORK_STATE_WAIT_CHECK_ANSWER 4 + struct pn544_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; @@ -64,6 +119,16 @@ struct pn544_i2c_phy { unsigned int gpio_fw; unsigned int en_polarity; + struct work_struct fw_work; + int fw_work_state; + char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; + const struct firmware *fw; + u32 fw_blob_dest_addr; + size_t fw_blob_size; + const u8 *fw_blob_data; + size_t fw_written; + int fw_cmd_result; + int powered; int run_mode; @@ -313,6 +378,42 @@ flush: return r; } +static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy) +{ + int r; + struct pn544_i2c_fw_frame_response response; + struct i2c_client *client = phy->i2c_dev; + + r = i2c_master_recv(client, (char *) &response, sizeof(response)); + if (r != sizeof(response)) { + dev_err(&client->dev, "cannot read fw status\n"); + return -EIO; + } + + usleep_range(3000, 6000); + + switch (response.status) { + case 0: + return 0; + case PN544_FW_CMD_RESULT_TIMEOUT: + return -ETIMEDOUT; + case PN544_FW_CMD_RESULT_BAD_CRC: + return -ENODATA; + case PN544_FW_CMD_RESULT_ACCESS_DENIED: + return -EACCES; + case PN544_FW_CMD_RESULT_PROTOCOL_ERROR: + return -EPROTO; + case PN544_FW_CMD_RESULT_INVALID_PARAMETER: + return -EINVAL; + case PN544_FW_CMD_RESULT_INVALID_LENGTH: + return -EBADMSG; + case PN544_FW_CMD_RESULT_WRITE_FAILED: + return -EIO; + default: + return -EIO; + } +} + /* * Reads an shdlc frame from the chip. This is not as straightforward as it * seems. There are cases where we could loose the frame start synchronization. @@ -347,19 +448,23 @@ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id) if (phy->hard_fault != 0) return IRQ_HANDLED; - r = pn544_hci_i2c_read(phy, &skb); - if (r == -EREMOTEIO) { - phy->hard_fault = r; - - nfc_hci_recv_frame(phy->hdev, NULL); + if (phy->run_mode == PN544_FW_MODE) { + phy->fw_cmd_result = pn544_hci_i2c_fw_read_status(phy); + schedule_work(&phy->fw_work); + } else { + r = pn544_hci_i2c_read(phy, &skb); + if (r == -EREMOTEIO) { + phy->hard_fault = r; - return IRQ_HANDLED; - } else if ((r == -ENOMEM) || (r == -EBADMSG)) { - return IRQ_HANDLED; - } + nfc_hci_recv_frame(phy->hdev, NULL); - nfc_hci_recv_frame(phy->hdev, skb); + return IRQ_HANDLED; + } else if ((r == -ENOMEM) || (r == -EBADMSG)) { + return IRQ_HANDLED; + } + nfc_hci_recv_frame(phy->hdev, skb); + } return IRQ_HANDLED; } @@ -369,6 +474,215 @@ static struct nfc_phy_ops i2c_phy_ops = { .disable = pn544_hci_i2c_disable, }; +static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name) +{ + struct pn544_i2c_phy *phy = phy_id; + + pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n", + firmware_name); + + strcpy(phy->firmware_name, firmware_name); + + phy->fw_work_state = FW_WORK_STATE_START; + + schedule_work(&phy->fw_work); + + return 0; +} + +static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy, + int result) +{ + pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result); + + pn544_hci_i2c_disable(phy); + + phy->fw_work_state = FW_WORK_STATE_IDLE; + + if (phy->fw) { + release_firmware(phy->fw); + phy->fw = NULL; + } + + nfc_fw_download_done(phy->hdev->ndev, phy->firmware_name, (u32) -result); +} + +static int pn544_hci_i2c_fw_write_cmd(struct i2c_client *client, u32 dest_addr, + const u8 *data, u16 datalen) +{ + u8 frame[PN544_FW_I2C_MAX_PAYLOAD]; + struct pn544_i2c_fw_frame_write *framep; + u16 params_len; + int framelen; + int r; + + if (datalen > PN544_FW_I2C_WRITE_DATA_MAX_LEN) + datalen = PN544_FW_I2C_WRITE_DATA_MAX_LEN; + + framep = (struct pn544_i2c_fw_frame_write *) frame; + + params_len = sizeof(framep->be_dest_addr) + + sizeof(framep->be_datalen) + datalen; + framelen = params_len + sizeof(framep->cmd) + + sizeof(framep->be_length); + + framep->cmd = PN544_FW_CMD_WRITE; + + put_unaligned_be16(params_len, &framep->be_length); + + framep->be_dest_addr[0] = (dest_addr & 0xff0000) >> 16; + framep->be_dest_addr[1] = (dest_addr & 0xff00) >> 8; + framep->be_dest_addr[2] = dest_addr & 0xff; + + put_unaligned_be16(datalen, &framep->be_datalen); + + memcpy(framep->data, data, datalen); + + r = i2c_master_send(client, frame, framelen); + + if (r == framelen) + return datalen; + else if (r < 0) + return r; + else + return -EIO; +} + +static int pn544_hci_i2c_fw_check_cmd(struct i2c_client *client, u32 start_addr, + const u8 *data, u16 datalen) +{ + struct pn544_i2c_fw_frame_check frame; + int r; + u16 crc; + + /* calculate local crc for the data we want to check */ + crc = crc_ccitt(0xffff, data, datalen); + + frame.cmd = PN544_FW_CMD_CHECK; + + put_unaligned_be16(sizeof(frame.be_start_addr) + + sizeof(frame.be_datalen) + sizeof(frame.be_crc), + &frame.be_length); + + /* tell the chip the memory region to which our crc applies */ + frame.be_start_addr[0] = (start_addr & 0xff0000) >> 16; + frame.be_start_addr[1] = (start_addr & 0xff00) >> 8; + frame.be_start_addr[2] = start_addr & 0xff; + + put_unaligned_be16(datalen, &frame.be_datalen); + + /* + * and give our local crc. Chip will calculate its own crc for the + * region and compare with ours. + */ + put_unaligned_be16(crc, &frame.be_crc); + + r = i2c_master_send(client, (const char *) &frame, sizeof(frame)); + + if (r == sizeof(frame)) + return 0; + else if (r < 0) + return r; + else + return -EIO; +} + +static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy) +{ + int r; + + r = pn544_hci_i2c_fw_write_cmd(phy->i2c_dev, + phy->fw_blob_dest_addr + phy->fw_written, + phy->fw_blob_data + phy->fw_written, + phy->fw_blob_size - phy->fw_written); + if (r < 0) + return r; + + phy->fw_written += r; + phy->fw_work_state = FW_WORK_STATE_WAIT_WRITE_ANSWER; + + return 0; +} + +static void pn544_hci_i2c_fw_work(struct work_struct *work) +{ + struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy, + fw_work); + int r; + struct pn544_i2c_fw_blob *blob; + + switch (phy->fw_work_state) { + case FW_WORK_STATE_START: + pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE); + + r = request_firmware(&phy->fw, phy->firmware_name, + &phy->i2c_dev->dev); + if (r < 0) + goto exit_state_start; + + blob = (struct pn544_i2c_fw_blob *) phy->fw->data; + phy->fw_blob_size = get_unaligned_be32(&blob->be_size); + phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr); + phy->fw_blob_data = blob->data; + + phy->fw_written = 0; + r = pn544_hci_i2c_fw_write_chunk(phy); + +exit_state_start: + if (r < 0) + pn544_hci_i2c_fw_work_complete(phy, r); + break; + + case FW_WORK_STATE_WAIT_WRITE_ANSWER: + r = phy->fw_cmd_result; + if (r < 0) + goto exit_state_wait_write_answer; + + if (phy->fw_written == phy->fw_blob_size) { + r = pn544_hci_i2c_fw_check_cmd(phy->i2c_dev, + phy->fw_blob_dest_addr, + phy->fw_blob_data, + phy->fw_blob_size); + if (r < 0) + goto exit_state_wait_write_answer; + phy->fw_work_state = FW_WORK_STATE_WAIT_CHECK_ANSWER; + break; + } + + r = pn544_hci_i2c_fw_write_chunk(phy); + +exit_state_wait_write_answer: + if (r < 0) + pn544_hci_i2c_fw_work_complete(phy, r); + break; + + case FW_WORK_STATE_WAIT_CHECK_ANSWER: + r = phy->fw_cmd_result; + if (r < 0) + goto exit_state_wait_check_answer; + + blob = (struct pn544_i2c_fw_blob *) (phy->fw_blob_data + + phy->fw_blob_size); + phy->fw_blob_size = get_unaligned_be32(&blob->be_size); + if (phy->fw_blob_size != 0) { + phy->fw_blob_dest_addr = + get_unaligned_be32(&blob->be_destaddr); + phy->fw_blob_data = blob->data; + + phy->fw_written = 0; + r = pn544_hci_i2c_fw_write_chunk(phy); + } + +exit_state_wait_check_answer: + if (r < 0 || phy->fw_blob_size == 0) + pn544_hci_i2c_fw_work_complete(phy, r); + break; + + default: + break; + } +} + static int pn544_hci_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -392,6 +706,9 @@ static int pn544_hci_i2c_probe(struct i2c_client *client, return -ENOMEM; } + INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work); + phy->fw_work_state = FW_WORK_STATE_IDLE; + phy->i2c_dev = client; i2c_set_clientdata(client, phy); @@ -428,7 +745,8 @@ static int pn544_hci_i2c_probe(struct i2c_client *client, r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM, - PN544_HCI_I2C_LLC_MAX_PAYLOAD, NULL, &phy->hdev); + PN544_HCI_I2C_LLC_MAX_PAYLOAD, + pn544_hci_i2c_fw_download, &phy->hdev); if (r < 0) goto err_hci; @@ -451,6 +769,10 @@ static int pn544_hci_i2c_remove(struct i2c_client *client) dev_dbg(&client->dev, "%s\n", __func__); + cancel_work_sync(&phy->fw_work); + if (phy->fw_work_state != FW_WORK_STATE_IDLE) + pn544_hci_i2c_fw_work_complete(phy, -ENODEV); + pn544_hci_remove(phy->hdev); if (phy->powered) -- cgit v0.10.2 From 4eba11e82a0365117be92453c5c91a263500fd1a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 18 Jun 2013 01:48:26 +0300 Subject: NFC: hci: Fix enable/disable confusion There is a cut and paste bug so we enable a second time instead of disabling. Signed-off-by: Dan Carpenter Signed-off-by: Samuel Ortiz diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index fe66908..d07ca4c 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -717,7 +717,7 @@ static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (hdev->ops->disable_se) - return hdev->ops->enable_se(hdev, se_idx); + return hdev->ops->disable_se(hdev, se_idx); return 0; } -- cgit v0.10.2 From 2c3832834b95e0226da1d13229472978f78462c5 Mon Sep 17 00:00:00 2001 From: Arron Wang Date: Tue, 30 Jul 2013 14:35:35 +0200 Subject: NFC: Fix secure element state check Another typo from the initial commit where we check for the secure element type field instead of its state when enabling or disabling it. Signed-off-by: Arron Wang Signed-off-by: Samuel Ortiz diff --git a/net/nfc/core.c b/net/nfc/core.c index d252912..ee1fe66 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -583,7 +583,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) goto error; } - if (se->type == NFC_SE_ENABLED) { + if (se->state == NFC_SE_ENABLED) { rc = -EALREADY; goto error; } @@ -626,7 +626,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) goto error; } - if (se->type == NFC_SE_DISABLED) { + if (se->state == NFC_SE_DISABLED) { rc = -EALREADY; goto error; } -- cgit v0.10.2 From 39525ee1dc78ca1f5f2fb1f764f7a141005fe440 Mon Sep 17 00:00:00 2001 From: Arron Wang Date: Tue, 30 Jul 2013 14:40:05 +0200 Subject: NFC: Update secure element state The secure element state was not updated from the enable/disable ops, leaving the SE state to disabled for ever. Signed-off-by: Arron Wang Signed-off-by: Samuel Ortiz diff --git a/net/nfc/core.c b/net/nfc/core.c index ee1fe66..e92923c 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -589,6 +589,8 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) } rc = dev->ops->enable_se(dev, se_idx); + if (rc >= 0) + se->state = NFC_SE_ENABLED; error: device_unlock(&dev->dev); @@ -632,6 +634,8 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) } rc = dev->ops->disable_se(dev, se_idx); + if (rc >= 0) + se->state = NFC_SE_DISABLED; error: device_unlock(&dev->dev); -- cgit v0.10.2 From fc4eba58b4c1462ff3d6247b66fb47d6928db6d2 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Wed, 14 Aug 2013 01:03:46 +0200 Subject: ipv6: make unsolicited report intervals configurable for mld Commit cab70040dfd95ee32144f02fade64f0cb94f31a0 ("net: igmp: Reduce Unsolicited report interval to 1s when using IGMPv3") and 2690048c01f32bf45d1c1e1ab3079bc10ad2aea7 ("net: igmp: Allow user-space configuration of igmp unsolicited report interval") by William Manley made igmp unsolicited report intervals configurable per interface and corrected the interval of unsolicited igmpv3 report messages resendings to 1s. Same needs to be done for IPv6: MLDv1 (RFC2710 7.10.): 10 seconds MLDv2 (RFC3810 9.11.): 1 second Both intervals are configurable via new procfs knobs mldv1_unsolicited_report_interval and mldv2_unsolicited_report_interval. (also added .force_mld_version to ipv6_devconf_dflt to bring structs in line without semantic changes) v2: a) Joined documentation update for IPv4 and IPv6 MLD/IGMP unsolicited_report_interval procfs knobs. b) incorporate stylistic feedback from William Manley v3: a) add new DEVCONF_* values to the end of the enum (thanks to David Miller) Cc: Cong Wang Cc: William Manley Cc: Benjamin LaHaise Cc: YOSHIFUJI Hideaki Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 36be26b..debfe85 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1039,7 +1039,15 @@ disable_policy - BOOLEAN disable_xfrm - BOOLEAN Disable IPSEC encryption on this interface, whatever the policy +igmpv2_unsolicited_report_interval - INTEGER + The interval in milliseconds in which the next unsolicited + IGMPv1 or IGMPv2 report retransmit will take place. + Default: 10000 (10 seconds) +igmpv3_unsolicited_report_interval - INTEGER + The interval in milliseconds in which the next unsolicited + IGMPv3 report retransmit will take place. + Default: 1000 (1 seconds) tag - INTEGER Allows you to write a number, which can be used as required. @@ -1331,6 +1339,16 @@ ndisc_notify - BOOLEAN 1 - Generate unsolicited neighbour advertisements when device is brought up or hardware address changes. +mldv1_unsolicited_report_interval - INTEGER + The interval in milliseconds in which the next unsolicited + MLDv1 report retransmit will take place. + Default: 10000 (10 seconds) + +mldv2_unsolicited_report_interval - INTEGER + The interval in milliseconds in which the next unsolicited + MLDv2 report retransmit will take place. + Default: 1000 (1 second) + icmp/*: ratelimit - INTEGER Limit the maximal rates for sending ICMPv6 packets. diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 850e95b..77a4784 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -19,6 +19,8 @@ struct ipv6_devconf { __s32 rtr_solicit_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; #ifdef CONFIG_IPV6_PRIVACY __s32 use_tempaddr; __s32 temp_valid_lft; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 4bda4cf..d07ac69 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -160,6 +160,8 @@ enum { DEVCONF_ACCEPT_DAD, DEVCONF_FORCE_TLLAO, DEVCONF_NDISC_NOTIFY, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7fd8572..ad12f7c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -177,6 +177,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .accept_redirects = 1, .autoconf = 1, .force_mld_version = 0, + .mldv1_unsolicited_report_interval = 10 * HZ, + .mldv2_unsolicited_report_interval = HZ, .dad_transmits = 1, .rtr_solicits = MAX_RTR_SOLICITATIONS, .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, @@ -211,6 +213,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .accept_ra = 1, .accept_redirects = 1, .autoconf = 1, + .force_mld_version = 0, + .mldv1_unsolicited_report_interval = 10 * HZ, + .mldv2_unsolicited_report_interval = HZ, .dad_transmits = 1, .rtr_solicits = MAX_RTR_SOLICITATIONS, .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, @@ -4179,6 +4184,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_RTR_SOLICIT_DELAY] = jiffies_to_msecs(cnf->rtr_solicit_delay); array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; + array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] = + jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval); + array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] = + jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval); #ifdef CONFIG_IPV6_PRIVACY array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr; array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft; @@ -4862,6 +4871,22 @@ static struct addrconf_sysctl_table .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "mldv1_unsolicited_report_interval", + .data = + &ipv6_devconf.mldv1_unsolicited_report_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + }, + { + .procname = "mldv2_unsolicited_report_interval", + .data = + &ipv6_devconf.mldv2_unsolicited_report_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + }, #ifdef CONFIG_IPV6_PRIVACY { .procname = "use_tempaddr", diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index db25b8e..6c76df9 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -108,7 +108,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev); -#define IGMP6_UNSOLICITED_IVAL (10*HZ) #define MLD_QRV_DEFAULT 2 #define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \ @@ -129,6 +128,18 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; pmc != NULL; \ pmc = rcu_dereference(pmc->next)) +static int unsolicited_report_interval(struct inet6_dev *idev) +{ + int iv; + + if (MLD_V1_SEEN(idev)) + iv = idev->cnf.mldv1_unsolicited_report_interval; + else + iv = idev->cnf.mldv2_unsolicited_report_interval; + + return iv > 0 ? iv : 1; +} + int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct net_device *dev = NULL; @@ -2158,7 +2169,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); - delay = net_random() % IGMP6_UNSOLICITED_IVAL; + delay = net_random() % unsolicited_report_interval(ma->idev); spin_lock_bh(&ma->mca_lock); if (del_timer(&ma->mca_timer)) { @@ -2325,7 +2336,7 @@ void ipv6_mc_init_dev(struct inet6_dev *idev) setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire, (unsigned long)idev); idev->mc_qrv = MLD_QRV_DEFAULT; - idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL; + idev->mc_maxdelay = unsolicited_report_interval(idev); idev->mc_v1_seen = 0; write_unlock_bh(&idev->lock); } -- cgit v0.10.2 From f0c03956ac40fdc4fbce7bed262ae74ac372e765 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 13 Aug 2013 15:05:38 +0200 Subject: netfilter: export xt_rpfilter.h to userland This file contains the API for the match "rpfilter", hence it should be exported to userland. Signed-off-by: Nicolas Dichtel Signed-off-by: Pablo Neira Ayuso diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h deleted file mode 100644 index 8358d4f..0000000 --- a/include/linux/netfilter/xt_rpfilter.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _XT_RPATH_H -#define _XT_RPATH_H - -#include - -enum { - XT_RPFILTER_LOOSE = 1 << 0, - XT_RPFILTER_VALID_MARK = 1 << 1, - XT_RPFILTER_ACCEPT_LOCAL = 1 << 2, - XT_RPFILTER_INVERT = 1 << 3, -#ifdef __KERNEL__ - XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE | - XT_RPFILTER_VALID_MARK | - XT_RPFILTER_ACCEPT_LOCAL | - XT_RPFILTER_INVERT, -#endif -}; - -struct xt_rpfilter_info { - __u8 flags; -}; - -#endif diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild index 4111577..dc00927 100644 --- a/include/uapi/linux/netfilter/Kbuild +++ b/include/uapi/linux/netfilter/Kbuild @@ -68,6 +68,7 @@ header-y += xt_quota.h header-y += xt_rateest.h header-y += xt_realm.h header-y += xt_recent.h +header-y += xt_rpfilter.h header-y += xt_sctp.h header-y += xt_set.h header-y += xt_socket.h diff --git a/include/uapi/linux/netfilter/xt_rpfilter.h b/include/uapi/linux/netfilter/xt_rpfilter.h new file mode 100644 index 0000000..8358d4f --- /dev/null +++ b/include/uapi/linux/netfilter/xt_rpfilter.h @@ -0,0 +1,23 @@ +#ifndef _XT_RPATH_H +#define _XT_RPATH_H + +#include + +enum { + XT_RPFILTER_LOOSE = 1 << 0, + XT_RPFILTER_VALID_MARK = 1 << 1, + XT_RPFILTER_ACCEPT_LOCAL = 1 << 2, + XT_RPFILTER_INVERT = 1 << 3, +#ifdef __KERNEL__ + XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE | + XT_RPFILTER_VALID_MARK | + XT_RPFILTER_ACCEPT_LOCAL | + XT_RPFILTER_INVERT, +#endif +}; + +struct xt_rpfilter_info { + __u8 flags; +}; + +#endif -- cgit v0.10.2 From 38c67328ac79cb9eaf61b5d4750fe3b9cff0dd15 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 13 Aug 2013 15:05:39 +0200 Subject: netfilter: export xt_HMARK.h to userland This file contains the API for the target "HMARK", hence it should be exported to userland. Signed-off-by: Nicolas Dichtel Signed-off-by: Pablo Neira Ayuso diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h deleted file mode 100644 index 826fc58..0000000 --- a/include/linux/netfilter/xt_HMARK.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef XT_HMARK_H_ -#define XT_HMARK_H_ - -#include - -enum { - XT_HMARK_SADDR_MASK, - XT_HMARK_DADDR_MASK, - XT_HMARK_SPI, - XT_HMARK_SPI_MASK, - XT_HMARK_SPORT, - XT_HMARK_DPORT, - XT_HMARK_SPORT_MASK, - XT_HMARK_DPORT_MASK, - XT_HMARK_PROTO_MASK, - XT_HMARK_RND, - XT_HMARK_MODULUS, - XT_HMARK_OFFSET, - XT_HMARK_CT, - XT_HMARK_METHOD_L3, - XT_HMARK_METHOD_L3_4, -}; -#define XT_HMARK_FLAG(flag) (1 << flag) - -union hmark_ports { - struct { - __u16 src; - __u16 dst; - } p16; - struct { - __be16 src; - __be16 dst; - } b16; - __u32 v32; - __be32 b32; -}; - -struct xt_hmark_info { - union nf_inet_addr src_mask; - union nf_inet_addr dst_mask; - union hmark_ports port_mask; - union hmark_ports port_set; - __u32 flags; - __u16 proto_mask; - __u32 hashrnd; - __u32 hmodulus; - __u32 hoffset; /* Mark offset to start from */ -}; - -#endif /* XT_HMARK_H_ */ diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild index dc00927..1749154 100644 --- a/include/uapi/linux/netfilter/Kbuild +++ b/include/uapi/linux/netfilter/Kbuild @@ -22,6 +22,7 @@ header-y += xt_CONNMARK.h header-y += xt_CONNSECMARK.h header-y += xt_CT.h header-y += xt_DSCP.h +header-y += xt_HMARK.h header-y += xt_IDLETIMER.h header-y += xt_LED.h header-y += xt_LOG.h diff --git a/include/uapi/linux/netfilter/xt_HMARK.h b/include/uapi/linux/netfilter/xt_HMARK.h new file mode 100644 index 0000000..826fc58 --- /dev/null +++ b/include/uapi/linux/netfilter/xt_HMARK.h @@ -0,0 +1,50 @@ +#ifndef XT_HMARK_H_ +#define XT_HMARK_H_ + +#include + +enum { + XT_HMARK_SADDR_MASK, + XT_HMARK_DADDR_MASK, + XT_HMARK_SPI, + XT_HMARK_SPI_MASK, + XT_HMARK_SPORT, + XT_HMARK_DPORT, + XT_HMARK_SPORT_MASK, + XT_HMARK_DPORT_MASK, + XT_HMARK_PROTO_MASK, + XT_HMARK_RND, + XT_HMARK_MODULUS, + XT_HMARK_OFFSET, + XT_HMARK_CT, + XT_HMARK_METHOD_L3, + XT_HMARK_METHOD_L3_4, +}; +#define XT_HMARK_FLAG(flag) (1 << flag) + +union hmark_ports { + struct { + __u16 src; + __u16 dst; + } p16; + struct { + __be16 src; + __be16 dst; + } b16; + __u32 v32; + __be32 b32; +}; + +struct xt_hmark_info { + union nf_inet_addr src_mask; + union nf_inet_addr dst_mask; + union hmark_ports port_mask; + union hmark_ports port_set; + __u32 flags; + __u16 proto_mask; + __u32 hashrnd; + __u32 hmodulus; + __u32 hoffset; /* Mark offset to start from */ +}; + +#endif /* XT_HMARK_H_ */ -- cgit v0.10.2 From 64261f230a9157f5f520ce30ec6827d679375e2f Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 13 Aug 2013 17:51:09 +0200 Subject: dev: move skb_scrub_packet() after eth_type_trans() skb_scrub_packet() was called before eth_type_trans() to let eth_type_trans() set pkt_type. In fact, we should force pkt_type to PACKET_HOST, so move the call after eth_type_trans(). Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 58eb802..1ed2b66 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1691,13 +1691,13 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) kfree_skb(skb); return NET_RX_DROP; } - skb_scrub_packet(skb); skb->protocol = eth_type_trans(skb, dev); /* eth_type_trans() can set pkt_type. - * clear pkt_type _after_ calling eth_type_trans() + * call skb_scrub_packet() after it to clear pkt_type _after_ calling + * eth_type_trans(). */ - skb->pkt_type = PACKET_HOST; + skb_scrub_packet(skb); return netif_rx(skb); } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 9fdf8a6..fbc1094 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -454,15 +454,16 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); - if (tunnel->net != dev_net(tunnel->dev)) - skb_scrub_packet(skb); - if (tunnel->dev->type == ARPHRD_ETHER) { skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } else { skb->dev = tunnel->dev; } + + if (tunnel->net != dev_net(tunnel->dev)) + skb_scrub_packet(skb); + gro_cells_receive(&tunnel->gro_cells, skb); return 0; -- cgit v0.10.2 From fc8f999daa10a5c5c19562b88be96a9b2bacd9b7 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 13 Aug 2013 17:51:10 +0200 Subject: ipv4 tunnels: use net_eq() helper to check netns It's better to use available helpers for these tests. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index fbc1094..a351a00 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -461,7 +461,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, skb->dev = tunnel->dev; } - if (tunnel->net != dev_net(tunnel->dev)) + if (!net_eq(tunnel->net, dev_net(tunnel->dev))) skb_scrub_packet(skb); gro_cells_receive(&tunnel->gro_cells, skb); @@ -614,7 +614,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - if (tunnel->net != dev_net(dev)) + if (!net_eq(tunnel->net, dev_net(dev))) skb_scrub_packet(skb); if (tunnel->err_count > 0) { diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a3437a4..f18f842 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -621,7 +621,7 @@ static int ipip6_rcv(struct sk_buff *skb) tstats->rx_packets++; tstats->rx_bytes += skb->len; - if (tunnel->net != dev_net(tunnel->dev)) + if (!net_eq(tunnel->net, dev_net(tunnel->dev))) skb_scrub_packet(skb); netif_rx(skb); @@ -860,7 +860,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, tunnel->err_count = 0; } - if (tunnel->net != dev_net(dev)) + if (!net_eq(tunnel->net, dev_net(dev))) skb_scrub_packet(skb); /* @@ -1589,7 +1589,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea /* If dev is in the same netns, it has already * been added to the list by the previous loop. */ - if (dev_net(t->dev) != net) + if (!net_eq(dev_net(t->dev), net)) unregister_netdevice_queue(t->dev, head); t = rtnl_dereference(t->next); -- cgit v0.10.2 From 6c742e714d8c282fd8f8b22d3e20b5141738c1ee Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 13 Aug 2013 17:51:11 +0200 Subject: ipip: add x-netns support This patch allows to switch the netns when packet is encapsulated or decapsulated. In other word, the encapsulated packet is received in a netns, where the lookup is done to find the tunnel. Once the tunnel is found, the packet is decapsulated and injecting into the corresponding interface which stands to another netns. When one of the two netns is removed, the tunnel is destroyed. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index c6acd9f..5a76f2b 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -102,7 +102,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, struct rtnl_link_ops *ops, char *devname); -void ip_tunnel_delete_net(struct ip_tunnel_net *itn); +void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 1f6eab6..bc3a765 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -534,7 +534,7 @@ static int __net_init ipgre_init_net(struct net *net) static void __net_exit ipgre_exit_net(struct net *net) { struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id); - ip_tunnel_delete_net(itn); + ip_tunnel_delete_net(itn, &ipgre_link_ops); } static struct pernet_operations ipgre_net_ops = { @@ -767,7 +767,7 @@ static int __net_init ipgre_tap_init_net(struct net *net) static void __net_exit ipgre_tap_exit_net(struct net *net) { struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id); - ip_tunnel_delete_net(itn); + ip_tunnel_delete_net(itn, &ipgre_tap_ops); } static struct pernet_operations ipgre_tap_net_ops = { diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index a351a00..a4d9126 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -350,7 +350,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) struct flowi4 fl4; struct rtable *rt; - rt = ip_route_output_tunnel(dev_net(dev), &fl4, + rt = ip_route_output_tunnel(tunnel->net, &fl4, tunnel->parms.iph.protocol, iph->daddr, iph->saddr, tunnel->parms.o_key, @@ -365,7 +365,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) } if (!tdev && tunnel->parms.link) - tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); + tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link); if (tdev) { hlen = tdev->hard_header_len + tdev->needed_headroom; @@ -654,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } } - err = iptunnel_xmit(dev_net(dev), rt, skb, + err = iptunnel_xmit(tunnel->net, rt, skb, fl4.saddr, fl4.daddr, protocol, ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); @@ -821,11 +821,10 @@ static void ip_tunnel_dev_free(struct net_device *dev) void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) { - struct net *net = dev_net(dev); struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel_net *itn; - itn = net_generic(net, tunnel->ip_tnl_net_id); + itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id); if (itn->fb_tunnel_dev != dev) { ip_tunnel_del(netdev_priv(dev)); @@ -855,6 +854,10 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, rtnl_lock(); itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms); + /* FB netdevice is special: we have one, and only one per netns. + * Allowing to move it to another netns is clearly unsafe. + */ + itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; rtnl_unlock(); if (IS_ERR(itn->fb_tunnel_dev)) @@ -864,28 +867,39 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, } EXPORT_SYMBOL_GPL(ip_tunnel_init_net); -static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head) +static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head, + struct rtnl_link_ops *ops) { + struct net *net = dev_net(itn->fb_tunnel_dev); + struct net_device *dev, *aux; int h; + for_each_netdev_safe(net, dev, aux) + if (dev->rtnl_link_ops == ops) + unregister_netdevice_queue(dev, head); + for (h = 0; h < IP_TNL_HASH_SIZE; h++) { struct ip_tunnel *t; struct hlist_node *n; struct hlist_head *thead = &itn->tunnels[h]; hlist_for_each_entry_safe(t, n, thead, hash_node) - unregister_netdevice_queue(t->dev, head); + /* If dev is in the same netns, it has already + * been added to the list by the previous loop. + */ + if (!net_eq(dev_net(t->dev), net)) + unregister_netdevice_queue(t->dev, head); } if (itn->fb_tunnel_dev) unregister_netdevice_queue(itn->fb_tunnel_dev, head); } -void ip_tunnel_delete_net(struct ip_tunnel_net *itn) +void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops) { LIST_HEAD(list); rtnl_lock(); - ip_tunnel_destroy(itn, &list); + ip_tunnel_destroy(itn, &list, ops); unregister_netdevice_many(&list); rtnl_unlock(); } @@ -929,23 +943,21 @@ EXPORT_SYMBOL_GPL(ip_tunnel_newlink); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p) { - struct ip_tunnel *t, *nt; - struct net *net = dev_net(dev); + struct ip_tunnel *t; struct ip_tunnel *tunnel = netdev_priv(dev); + struct net *net = tunnel->net; struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id); if (dev == itn->fb_tunnel_dev) return -EINVAL; - nt = netdev_priv(dev); - t = ip_tunnel_find(itn, p, dev->type); if (t) { if (t->dev != dev) return -EEXIST; } else { - t = nt; + t = tunnel; if (dev->type != ARPHRD_ETHER) { unsigned int nflags = 0; @@ -984,6 +996,7 @@ int ip_tunnel_init(struct net_device *dev) } tunnel->dev = dev; + tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->ihl = 5; @@ -994,8 +1007,8 @@ EXPORT_SYMBOL_GPL(ip_tunnel_init); void ip_tunnel_uninit(struct net_device *dev) { - struct net *net = dev_net(dev); struct ip_tunnel *tunnel = netdev_priv(dev); + struct net *net = tunnel->net; struct ip_tunnel_net *itn; itn = net_generic(net, tunnel->ip_tnl_net_id); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 79b263d..e805e7b 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -318,7 +318,7 @@ static int __net_init vti_init_net(struct net *net) static void __net_exit vti_exit_net(struct net *net) { struct ip_tunnel_net *itn = net_generic(net, vti_net_id); - ip_tunnel_delete_net(itn); + ip_tunnel_delete_net(itn, &vti_link_ops); } static struct pernet_operations vti_net_ops = { diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 51fc2a1..87bd295 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -286,7 +286,6 @@ static void ipip_tunnel_setup(struct net_device *dev) dev->flags = IFF_NOARP; dev->iflink = 0; dev->addr_len = 4; - dev->features |= NETIF_F_NETNS_LOCAL; dev->features |= NETIF_F_LLTX; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; @@ -437,7 +436,7 @@ static int __net_init ipip_init_net(struct net *net) static void __net_exit ipip_exit_net(struct net *net) { struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); - ip_tunnel_delete_net(itn); + ip_tunnel_delete_net(itn, &ipip_link_ops); } static struct pernet_operations ipip_net_ops = { -- cgit v0.10.2 From 0bd8762824e73a3cce7b7560a97463301764b616 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 13 Aug 2013 17:51:12 +0200 Subject: ip6tnl: add x-netns support This patch allows to switch the netns when packet is encapsulated or decapsulated. In other word, the encapsulated packet is received in a netns, where the lookup is done to find the tunnel. Once the tunnel is found, the packet is decapsulated and injecting into the corresponding interface which stands to another netns. When one of the two netns is removed, the tunnel is destroyed. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 4da5de1..2265b0b 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -36,6 +36,7 @@ struct __ip6_tnl_parm { struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ struct net_device *dev; /* virtual device associated with tunnel */ + struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ struct dst_entry *dst_cache; /* cached dst */ diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index ecd6073..f2d0a42 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -335,6 +335,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, dev->rtnl_link_ops = &ip6gre_link_ops; nt->dev = dev; + nt->net = dev_net(dev); ip6gre_tnl_link_config(nt, 1); if (register_netdevice(dev) < 0) @@ -1255,6 +1256,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) tunnel = netdev_priv(dev); tunnel->dev = dev; + tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); @@ -1275,6 +1277,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) struct ip6_tnl *tunnel = netdev_priv(dev); tunnel->dev = dev; + tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); tunnel->hlen = sizeof(struct ipv6hdr) + 4; @@ -1450,6 +1453,7 @@ static int ip6gre_tap_init(struct net_device *dev) tunnel = netdev_priv(dev); tunnel->dev = dev; + tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); ip6gre_tnl_link_config(tunnel, 1); @@ -1501,6 +1505,7 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, eth_hw_addr_random(dev); nt->dev = dev; + nt->net = dev_net(dev); ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); /* Can use a lockless transmit, unless we generate output sequences */ diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 1e55866..cc3bb20 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -315,6 +315,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) t = netdev_priv(dev); t->parms = *p; + t->net = dev_net(dev); err = ip6_tnl_create2(dev); if (err < 0) goto failed_free; @@ -374,7 +375,7 @@ static void ip6_tnl_dev_uninit(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - struct net *net = dev_net(dev); + struct net *net = t->net; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (dev == ip6n->fb_tnl_dev) @@ -741,7 +742,7 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t, { struct __ip6_tnl_parm *p = &t->parms; int ret = 0; - struct net *net = dev_net(t->dev); + struct net *net = t->net; if ((p->flags & IP6_TNL_F_CAP_RCV) || ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && @@ -827,6 +828,9 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, tstats->rx_packets++; tstats->rx_bytes += skb->len; + if (!net_eq(t->net, dev_net(t->dev))) + skb_scrub_packet(skb); + netif_rx(skb); rcu_read_unlock(); @@ -895,7 +899,7 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t) { struct __ip6_tnl_parm *p = &t->parms; int ret = 0; - struct net *net = dev_net(t->dev); + struct net *net = t->net; if (p->flags & IP6_TNL_F_CAP_XMIT) { struct net_device *ldev = NULL; @@ -945,8 +949,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, int encap_limit, __u32 *pmtu) { - struct net *net = dev_net(dev); struct ip6_tnl *t = netdev_priv(dev); + struct net *net = t->net; struct net_device_stats *stats = &t->dev->stats; struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6_tel_txoption opt; @@ -996,6 +1000,9 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, goto tx_err_dst_release; } + if (!net_eq(t->net, dev_net(dev))) + skb_scrub_packet(skb); + /* * Okay, now see if we can stuff it in the buffer as-is. */ @@ -1202,7 +1209,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) int strict = (ipv6_addr_type(&p->raddr) & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); - struct rt6_info *rt = rt6_lookup(dev_net(dev), + struct rt6_info *rt = rt6_lookup(t->net, &p->raddr, &p->laddr, p->link, strict); @@ -1251,7 +1258,7 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) { - struct net *net = dev_net(t->dev); + struct net *net = t->net; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); int err; @@ -1463,7 +1470,6 @@ static void ip6_tnl_dev_setup(struct net_device *dev) dev->mtu-=8; dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); - dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; } @@ -1479,6 +1485,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev) struct ip6_tnl *t = netdev_priv(dev); t->dev = dev; + t->net = dev_net(dev); dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; @@ -1596,9 +1603,9 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct ip6_tnl *t; + struct ip6_tnl *t = netdev_priv(dev); struct __ip6_tnl_parm p; - struct net *net = dev_net(dev); + struct net *net = t->net; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); if (dev == ip6n->fb_tnl_dev) @@ -1699,14 +1706,24 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) { + struct net *net = dev_net(ip6n->fb_tnl_dev); + struct net_device *dev, *aux; int h; struct ip6_tnl *t; LIST_HEAD(list); + for_each_netdev_safe(net, dev, aux) + if (dev->rtnl_link_ops == &ip6_link_ops) + unregister_netdevice_queue(dev, &list); + for (h = 0; h < HASH_SIZE; h++) { t = rtnl_dereference(ip6n->tnls_r_l[h]); while (t != NULL) { - unregister_netdevice_queue(t->dev, &list); + /* If dev is in the same netns, it has already + * been added to the list by the previous loop. + */ + if (!net_eq(dev_net(t->dev), net)) + unregister_netdevice_queue(t->dev, &list); t = rtnl_dereference(t->next); } } @@ -1732,6 +1749,10 @@ static int __net_init ip6_tnl_init_net(struct net *net) if (!ip6n->fb_tnl_dev) goto err_alloc_dev; dev_net_set(ip6n->fb_tnl_dev, net); + /* FB netdevice is special: we have one, and only one per netns. + * Allowing to move it to another netns is clearly unsafe. + */ + ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL; err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); if (err < 0) -- cgit v0.10.2 From fce9b9be89cece975675142a3953bfb5299d195d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Aug 2013 12:35:42 +0300 Subject: rtnetlink: remove an unneeded test We know that "dev" is a valid pointer at this point, so we can remove the test and clean up a little. Signed-off-by: Dan Carpenter Reviewed-by: "Eric W. Biederman" Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 0b2972c..242084e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1867,10 +1867,10 @@ replay: else err = register_netdevice(dev); - if (err < 0 && !IS_ERR(dev)) + if (err < 0) { free_netdev(dev); - if (err < 0) goto out; + } err = rtnl_configure_link(dev, ifm); if (err < 0) -- cgit v0.10.2 From ebc2ec484db9c7c96b41f7d0d6d9d2e24f601116 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Wed, 14 Aug 2013 20:54:38 +0800 Subject: net/usb/r8152: support aggregation Enable the tx/rx aggregation which could contain one or more packets for each bulk in/out. This could reduce the loading of the host controller by sending less bulk transfer. The rx packets in the bulk in buffer should be 8-byte aligned, and the tx packets in the bulk out buffer should be 4-byte aligned. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 11c51f2..abb0b9f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -19,9 +19,10 @@ #include #include #include +#include /* Version Information */ -#define DRIVER_VERSION "v1.0.0 (2013/05/03)" +#define DRIVER_VERSION "v1.01.0 (2013/08/12)" #define DRIVER_AUTHOR "Realtek linux nic maintainers " #define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" #define MODULENAME "r8152" @@ -267,6 +268,9 @@ enum rtl_register_content { FULL_DUP = 0x01, }; +#define RTL8152_MAX_TX 10 +#define RTL8152_MAX_RX 10 + #define RTL8152_REQT_READ 0xc0 #define RTL8152_REQT_WRITE 0x40 #define RTL8152_REQ_GET_REGS 0x05 @@ -285,7 +289,6 @@ enum rtl_register_content { /* rtl8152 flags */ enum rtl8152_flags { RTL8152_UNPLUG = 0, - RX_URB_FAIL, RTL8152_SET_RX_MODE, WORK_ENABLE }; @@ -315,13 +318,34 @@ struct tx_desc { u32 opts2; }; +struct rx_agg { + struct list_head list; + struct urb *urb; + void *context; + void *buffer; + void *head; +}; + +struct tx_agg { + struct list_head list; + struct urb *urb; + void *context; + void *buffer; + void *head; + u32 skb_num; + u32 skb_len; +}; + struct r8152 { unsigned long flags; struct usb_device *udev; struct tasklet_struct tl; struct net_device *netdev; - struct urb *rx_urb, *tx_urb; - struct sk_buff *tx_skb, *rx_skb; + struct tx_agg tx_info[RTL8152_MAX_TX]; + struct rx_agg rx_info[RTL8152_MAX_RX]; + struct list_head rx_done, tx_free; + struct sk_buff_head tx_queue; + spinlock_t rx_lock, tx_lock; struct delayed_work schedule; struct mii_if_info mii; u32 msg_enable; @@ -340,6 +364,7 @@ enum rtl_version { * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; +static unsigned int rx_buf_sz = 16384; static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) @@ -686,6 +711,9 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data) ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data); } +static +int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); + static inline void set_ethernet_addr(struct r8152 *tp) { struct net_device *dev = tp->netdev; @@ -716,26 +744,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) return 0; } -static int alloc_all_urbs(struct r8152 *tp) -{ - tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!tp->rx_urb) - return 0; - tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!tp->tx_urb) { - usb_free_urb(tp->rx_urb); - return 0; - } - - return 1; -} - -static void free_all_urbs(struct r8152 *tp) -{ - usb_free_urb(tp->rx_urb); - usb_free_urb(tp->tx_urb); -} - static struct net_device_stats *rtl8152_get_stats(struct net_device *dev) { return &dev->stats; @@ -743,129 +751,425 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev) static void read_bulk_callback(struct urb *urb) { - struct r8152 *tp; - unsigned pkt_len; - struct sk_buff *skb; struct net_device *netdev; - struct net_device_stats *stats; + unsigned long lockflags; int status = urb->status; + struct rx_agg *agg; + struct r8152 *tp; int result; - struct rx_desc *rx_desc; - tp = urb->context; + agg = urb->context; + if (!agg) + return; + + tp = agg->context; if (!tp) return; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + + if (!test_bit(WORK_ENABLE, &tp->flags)) + return; + netdev = tp->netdev; - if (!netif_device_present(netdev)) + if (!netif_carrier_ok(netdev)) return; - stats = rtl8152_get_stats(netdev); switch (status) { case 0: - break; + if (urb->actual_length < ETH_ZLEN) + break; + + spin_lock_irqsave(&tp->rx_lock, lockflags); + list_add_tail(&agg->list, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, lockflags); + tasklet_schedule(&tp->tl); + return; case -ESHUTDOWN: set_bit(RTL8152_UNPLUG, &tp->flags); netif_device_detach(tp->netdev); + return; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: pr_warn_ratelimited("may be reset is needed?..\n"); - goto goon; + break; default: pr_warn_ratelimited("Rx status %d\n", status); - goto goon; + break; } - /* protect against short packets (tell me why we got some?!?) */ - if (urb->actual_length < sizeof(*rx_desc)) - goto goon; - - - rx_desc = (struct rx_desc *)urb->transfer_buffer; - pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; - if (urb->actual_length < sizeof(struct rx_desc) + pkt_len) - goto goon; - - skb = netdev_alloc_skb_ip_align(netdev, pkt_len); - if (!skb) - goto goon; - - memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len); - skb_put(skb, pkt_len); - skb->protocol = eth_type_trans(skb, netdev); - netif_rx(skb); - stats->rx_packets++; - stats->rx_bytes += pkt_len; -goon: - usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), - tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc), - (usb_complete_t)read_bulk_callback, tp); - result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC); + result = r8152_submit_rx(tp, agg, GFP_ATOMIC); if (result == -ENODEV) { netif_device_detach(tp->netdev); } else if (result) { - set_bit(RX_URB_FAIL, &tp->flags); - goto resched; - } else { - clear_bit(RX_URB_FAIL, &tp->flags); + spin_lock_irqsave(&tp->rx_lock, lockflags); + list_add_tail(&agg->list, &tp->rx_done); + spin_unlock_irqrestore(&tp->rx_lock, lockflags); + tasklet_schedule(&tp->tl); } - - return; -resched: - tasklet_schedule(&tp->tl); } -static void rx_fixup(unsigned long data) +static void write_bulk_callback(struct urb *urb) { + struct net_device_stats *stats; + unsigned long lockflags; + struct tx_agg *agg; struct r8152 *tp; - int status; + int status = urb->status; - tp = (struct r8152 *)data; - if (!test_bit(WORK_ENABLE, &tp->flags)) + agg = urb->context; + if (!agg) return; - status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC); - if (status == -ENODEV) { - netif_device_detach(tp->netdev); - } else if (status) { - set_bit(RX_URB_FAIL, &tp->flags); - goto tlsched; + tp = agg->context; + if (!tp) + return; + + stats = rtl8152_get_stats(tp->netdev); + if (status) { + pr_warn_ratelimited("Tx status %d\n", status); + stats->tx_errors += agg->skb_num; } else { - clear_bit(RX_URB_FAIL, &tp->flags); + stats->tx_packets += agg->skb_num; + stats->tx_bytes += agg->skb_len; } - return; -tlsched: - tasklet_schedule(&tp->tl); + spin_lock_irqsave(&tp->tx_lock, lockflags); + list_add_tail(&agg->list, &tp->tx_free); + spin_unlock_irqrestore(&tp->tx_lock, lockflags); + + if (!netif_carrier_ok(tp->netdev)) + return; + + if (!test_bit(WORK_ENABLE, &tp->flags)) + return; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + if (!skb_queue_empty(&tp->tx_queue)) + tasklet_schedule(&tp->tl); } -static void write_bulk_callback(struct urb *urb) +static inline void *rx_agg_align(void *data) +{ + return (void *)ALIGN((uintptr_t)data, 8); +} + +static inline void *tx_agg_align(void *data) +{ + return (void *)ALIGN((uintptr_t)data, 4); +} + +static void free_all_mem(struct r8152 *tp) +{ + int i; + + for (i = 0; i < RTL8152_MAX_RX; i++) { + if (tp->rx_info[i].urb) { + usb_free_urb(tp->rx_info[i].urb); + tp->rx_info[i].urb = NULL; + } + + if (tp->rx_info[i].buffer) { + kfree(tp->rx_info[i].buffer); + tp->rx_info[i].buffer = NULL; + tp->rx_info[i].head = NULL; + } + } + + for (i = 0; i < RTL8152_MAX_TX; i++) { + if (tp->tx_info[i].urb) { + usb_free_urb(tp->tx_info[i].urb); + tp->tx_info[i].urb = NULL; + } + + if (tp->tx_info[i].buffer) { + kfree(tp->tx_info[i].buffer); + tp->tx_info[i].buffer = NULL; + tp->tx_info[i].head = NULL; + } + } +} + +static int alloc_all_mem(struct r8152 *tp) +{ + struct net_device *netdev = tp->netdev; + struct urb *urb; + int node, i; + u8 *buf; + + node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; + + spin_lock_init(&tp->rx_lock); + spin_lock_init(&tp->tx_lock); + INIT_LIST_HEAD(&tp->rx_done); + INIT_LIST_HEAD(&tp->tx_free); + skb_queue_head_init(&tp->tx_queue); + + for (i = 0; i < RTL8152_MAX_RX; i++) { + buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!buf) + goto err1; + + if (buf != rx_agg_align(buf)) { + kfree(buf); + buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node); + if (!buf) + goto err1; + } + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + kfree(buf); + goto err1; + } + + INIT_LIST_HEAD(&tp->rx_info[i].list); + tp->rx_info[i].context = tp; + tp->rx_info[i].urb = urb; + tp->rx_info[i].buffer = buf; + tp->rx_info[i].head = rx_agg_align(buf); + } + + for (i = 0; i < RTL8152_MAX_TX; i++) { + buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!buf) + goto err1; + + if (buf != tx_agg_align(buf)) { + kfree(buf); + buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node); + if (!buf) + goto err1; + } + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + kfree(buf); + goto err1; + } + + INIT_LIST_HEAD(&tp->tx_info[i].list); + tp->tx_info[i].context = tp; + tp->tx_info[i].urb = urb; + tp->tx_info[i].buffer = buf; + tp->tx_info[i].head = tx_agg_align(buf); + + list_add_tail(&tp->tx_info[i].list, &tp->tx_free); + } + + return 0; + +err1: + free_all_mem(tp); + return -ENOMEM; +} + +static void rx_bottom(struct r8152 *tp) +{ + struct net_device_stats *stats; + struct net_device *netdev; + struct rx_agg *agg; + struct rx_desc *rx_desc; + unsigned long lockflags; + struct list_head *cursor, *next; + struct sk_buff *skb; + struct urb *urb; + unsigned pkt_len; + int len_used; + u8 *rx_data; + int ret; + + netdev = tp->netdev; + + stats = rtl8152_get_stats(netdev); + + spin_lock_irqsave(&tp->rx_lock, lockflags); + list_for_each_safe(cursor, next, &tp->rx_done) { + list_del_init(cursor); + spin_unlock_irqrestore(&tp->rx_lock, lockflags); + + agg = list_entry(cursor, struct rx_agg, list); + urb = agg->urb; + if (urb->actual_length < ETH_ZLEN) { + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); + spin_lock_irqsave(&tp->rx_lock, lockflags); + if (ret && ret != -ENODEV) { + list_add_tail(&agg->list, next); + tasklet_schedule(&tp->tl); + } + continue; + } + + len_used = 0; + rx_desc = agg->head; + rx_data = agg->head; + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; + len_used += sizeof(struct rx_desc) + pkt_len; + + while (urb->actual_length >= len_used) { + if (pkt_len < ETH_ZLEN) + break; + + pkt_len -= 4; /* CRC */ + rx_data += sizeof(struct rx_desc); + + skb = netdev_alloc_skb_ip_align(netdev, pkt_len); + if (!skb) { + stats->rx_dropped++; + break; + } + memcpy(skb->data, rx_data, pkt_len); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + + rx_data = rx_agg_align(rx_data + pkt_len + 4); + rx_desc = (struct rx_desc *)rx_data; + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; + len_used = (int)(rx_data - (u8 *)agg->head); + len_used += sizeof(struct rx_desc) + pkt_len; + } + + ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); + spin_lock_irqsave(&tp->rx_lock, lockflags); + if (ret && ret != -ENODEV) { + list_add_tail(&agg->list, next); + tasklet_schedule(&tp->tl); + } + } + spin_unlock_irqrestore(&tp->rx_lock, lockflags); +} + +static void tx_bottom(struct r8152 *tp) +{ + struct net_device_stats *stats; + struct net_device *netdev; + struct tx_agg *agg; + unsigned long lockflags; + u32 remain, total; + u8 *tx_data; + int res; + + netdev = tp->netdev; + +next_agg: + agg = NULL; + spin_lock_irqsave(&tp->tx_lock, lockflags); + if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) { + struct list_head *cursor; + + cursor = tp->tx_free.next; + list_del_init(cursor); + agg = list_entry(cursor, struct tx_agg, list); + } + spin_unlock_irqrestore(&tp->tx_lock, lockflags); + + if (!agg) + return; + + tx_data = agg->head; + agg->skb_num = agg->skb_len = 0; + remain = rx_buf_sz - sizeof(struct tx_desc); + total = 0; + + while (remain >= ETH_ZLEN) { + struct tx_desc *tx_desc; + struct sk_buff *skb; + unsigned int len; + + skb = skb_dequeue(&tp->tx_queue); + if (!skb) + break; + + len = skb->len; + if (remain < len) { + skb_queue_head(&tp->tx_queue, skb); + break; + } + + tx_data = tx_agg_align(tx_data); + tx_desc = (struct tx_desc *)tx_data; + tx_data += sizeof(*tx_desc); + + tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | + TX_LS); + memcpy(tx_data, skb->data, len); + agg->skb_num++; + agg->skb_len += len; + dev_kfree_skb_any(skb); + + tx_data += len; + remain = rx_buf_sz - sizeof(*tx_desc) - + (u32)(tx_agg_align(tx_data) - agg->head); + } + + usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), + agg->head, (int)(tx_data - (u8 *)agg->head), + (usb_complete_t)write_bulk_callback, agg); + res = usb_submit_urb(agg->urb, GFP_ATOMIC); + + stats = rtl8152_get_stats(netdev); + + if (res) { + /* Can we get/handle EPIPE here? */ + if (res == -ENODEV) { + netif_device_detach(netdev); + } else { + netif_warn(tp, tx_err, netdev, + "failed tx_urb %d\n", res); + stats->tx_dropped += agg->skb_num; + spin_lock_irqsave(&tp->tx_lock, lockflags); + list_add_tail(&agg->list, &tp->tx_free); + spin_unlock_irqrestore(&tp->tx_lock, lockflags); + } + return; + } + goto next_agg; +} + +static void bottom_half(unsigned long data) { struct r8152 *tp; - int status = urb->status; - tp = urb->context; - if (!tp) + tp = (struct r8152 *)data; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + if (!test_bit(WORK_ENABLE, &tp->flags)) return; - dev_kfree_skb_irq(tp->tx_skb); - if (!netif_device_present(tp->netdev)) + + if (!netif_carrier_ok(tp->netdev)) return; - if (status) - dev_info(&urb->dev->dev, "%s: Tx status %d\n", - tp->netdev->name, status); - tp->netdev->trans_start = jiffies; - netif_wake_queue(tp->netdev); + + rx_bottom(tp); + tx_bottom(tp); +} + +static +int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) +{ + usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), + agg->head, rx_buf_sz, + (usb_complete_t)read_bulk_callback, agg); + + return usb_submit_urb(agg->urb, mem_flags); } static void rtl8152_tx_timeout(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); - struct net_device_stats *stats = rtl8152_get_stats(netdev); + int i; + netif_warn(tp, tx_err, netdev, "Tx timeout.\n"); - usb_unlink_urb(tp->tx_urb); - stats->tx_errors++; + for (i = 0; i < RTL8152_MAX_TX; i++) + usb_unlink_urb(tp->tx_info[i].urb); } static void rtl8152_set_rx_mode(struct net_device *netdev) @@ -923,33 +1227,44 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, { struct r8152 *tp = netdev_priv(netdev); struct net_device_stats *stats = rtl8152_get_stats(netdev); + unsigned long lockflags; + struct tx_agg *agg = NULL; struct tx_desc *tx_desc; unsigned int len; + u8 *tx_data; int res; - netif_stop_queue(netdev); - len = skb->len; - if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) { - struct sk_buff *tx_skb; + skb_tx_timestamp(skb); - tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC); - dev_kfree_skb_any(skb); - if (!tx_skb) { - stats->tx_dropped++; - netif_wake_queue(netdev); - return NETDEV_TX_OK; - } - skb = tx_skb; + spin_lock_irqsave(&tp->tx_lock, lockflags); + if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) { + struct list_head *cursor; + + cursor = tp->tx_free.next; + list_del_init(cursor); + agg = list_entry(cursor, struct tx_agg, list); } - tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc)); - memset(tx_desc, 0, sizeof(*tx_desc)); - tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS); - tp->tx_skb = skb; - skb_tx_timestamp(skb); - usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), - skb->data, skb->len, - (usb_complete_t)write_bulk_callback, tp); - res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC); + spin_unlock_irqrestore(&tp->tx_lock, lockflags); + + if (!agg) { + skb_queue_tail(&tp->tx_queue, skb); + return NETDEV_TX_OK; + } + + tx_desc = (struct tx_desc *)agg->head; + tx_data = agg->head + sizeof(*tx_desc); + agg->skb_num = agg->skb_len = 0; + + len = skb->len; + tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS); + memcpy(tx_data, skb->data, len); + dev_kfree_skb_any(skb); + agg->skb_num++; + agg->skb_len += len; + usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), + agg->head, len + sizeof(*tx_desc), + (usb_complete_t)write_bulk_callback, agg); + res = usb_submit_urb(agg->urb, GFP_ATOMIC); if (res) { /* Can we get/handle EPIPE here? */ if (res == -ENODEV) { @@ -957,12 +1272,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, } else { netif_warn(tp, tx_err, netdev, "failed tx_urb %d\n", res); - stats->tx_errors++; - netif_start_queue(netdev); + stats->tx_dropped++; + spin_lock_irqsave(&tp->tx_lock, lockflags); + list_add_tail(&agg->list, &tp->tx_free); + spin_unlock_irqrestore(&tp->tx_lock, lockflags); } - } else { - stats->tx_packets++; - stats->tx_bytes += skb->len; } return NETDEV_TX_OK; @@ -999,17 +1313,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp) static int rtl8152_enable(struct r8152 *tp) { - u32 ocp_data; + u32 ocp_data; + int i, ret; u8 speed; speed = rtl8152_get_speed(tp); - if (speed & _100bps) { + if (speed & _10bps) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); - ocp_data &= ~EEEP_CR_EEEP_TX; + ocp_data |= EEEP_CR_EEEP_TX; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); } else { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); - ocp_data |= EEEP_CR_EEEP_TX; + ocp_data &= ~EEEP_CR_EEEP_TX; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); } @@ -1023,23 +1338,34 @@ static int rtl8152_enable(struct r8152 *tp) ocp_data &= ~RXDY_GATED_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); - usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), - tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc), - (usb_complete_t)read_bulk_callback, tp); + INIT_LIST_HEAD(&tp->rx_done); + ret = 0; + for (i = 0; i < RTL8152_MAX_RX; i++) { + INIT_LIST_HEAD(&tp->rx_info[i].list); + ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); + } - return usb_submit_urb(tp->rx_urb, GFP_KERNEL); + return ret; } static void rtl8152_disable(struct r8152 *tp) { - u32 ocp_data; - int i; + struct net_device_stats *stats = rtl8152_get_stats(tp->netdev); + struct sk_buff *skb; + u32 ocp_data; + int i; ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); - usb_kill_urb(tp->tx_urb); + while ((skb = skb_dequeue(&tp->tx_queue))) { + dev_kfree_skb(skb); + stats->tx_dropped++; + } + + for (i = 0; i < RTL8152_MAX_TX; i++) + usb_kill_urb(tp->tx_info[i].urb); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); ocp_data |= RXDY_GATED_EN; @@ -1058,7 +1384,8 @@ static void rtl8152_disable(struct r8152 *tp) mdelay(1); } - usb_kill_urb(tp->rx_urb); + for (i = 0; i < RTL8152_MAX_RX; i++) + usb_kill_urb(tp->rx_info[i].urb); rtl8152_nic_reset(tp); } @@ -1303,7 +1630,9 @@ static void set_carrier(struct r8152 *tp) } else { if (tp->speed & LINK_STATUS) { netif_carrier_off(netdev); + tasklet_disable(&tp->tl); rtl8152_disable(tp); + tasklet_enable(&tp->tl); } } tp->speed = speed; @@ -1369,7 +1698,9 @@ static int rtl8152_close(struct net_device *netdev) clear_bit(WORK_ENABLE, &tp->flags); cancel_delayed_work_sync(&tp->schedule); netif_stop_queue(netdev); + tasklet_disable(&tp->tl); rtl8152_disable(tp); + tasklet_enable(&tp->tl); return res; } @@ -1429,8 +1760,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp) static void r8152b_init(struct r8152 *tp) { - u32 ocp_data; - int i; + u32 ocp_data; + int i; rtl_clear_bp(tp); @@ -1475,9 +1806,9 @@ static void r8152b_init(struct r8152 *tp) break; } - /* disable rx aggregation */ + /* enable rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); - ocp_data |= RX_AGG_DISABLE; + ocp_data &= ~RX_AGG_DISABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } @@ -1490,6 +1821,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) if (netif_running(tp->netdev)) { clear_bit(WORK_ENABLE, &tp->flags); cancel_delayed_work_sync(&tp->schedule); + tasklet_disable(&tp->tl); } rtl8152_down(tp); @@ -1508,6 +1840,7 @@ static int rtl8152_resume(struct usb_interface *intf) set_bit(WORK_ENABLE, &tp->flags); set_bit(RTL8152_SET_RX_MODE, &tp->flags); schedule_delayed_work(&tp->schedule, 0); + tasklet_enable(&tp->tl); } return 0; @@ -1619,6 +1952,7 @@ static int rtl8152_probe(struct usb_interface *intf, struct usb_device *udev = interface_to_usbdev(intf); struct r8152 *tp; struct net_device *netdev; + int ret; if (udev->actconfig->desc.bConfigurationValue != 1) { usb_driver_set_configuration(udev, 1); @@ -1631,10 +1965,12 @@ static int rtl8152_probe(struct usb_interface *intf, return -ENOMEM; } + SET_NETDEV_DEV(netdev, &intf->dev); tp = netdev_priv(netdev); + memset(tp, 0, sizeof(*tp)); tp->msg_enable = 0x7FFF; - tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp); + tasklet_init(&tp->tl, bottom_half, (unsigned long)tp); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); tp->udev = udev; @@ -1657,37 +1993,27 @@ static int rtl8152_probe(struct usb_interface *intf, r8152b_init(tp); set_ethernet_addr(tp); - if (!alloc_all_urbs(tp)) { - netif_err(tp, probe, netdev, "out of memory"); + ret = alloc_all_mem(tp); + if (ret) goto out; - } - - tp->rx_skb = netdev_alloc_skb(netdev, - RTL8152_RMS + sizeof(struct rx_desc)); - if (!tp->rx_skb) - goto out1; usb_set_intfdata(intf, tp); - SET_NETDEV_DEV(netdev, &intf->dev); - - if (register_netdev(netdev) != 0) { + ret = register_netdev(netdev); + if (ret != 0) { netif_err(tp, probe, netdev, "couldn't register the device"); - goto out2; + goto out1; } netif_info(tp, probe, netdev, "%s", DRIVER_VERSION); return 0; -out2: - usb_set_intfdata(intf, NULL); - dev_kfree_skb(tp->rx_skb); out1: - free_all_urbs(tp); + usb_set_intfdata(intf, NULL); out: free_netdev(netdev); - return -EIO; + return ret; } static void rtl8152_unload(struct r8152 *tp) @@ -1715,9 +2041,7 @@ static void rtl8152_disconnect(struct usb_interface *intf) tasklet_kill(&tp->tl); unregister_netdev(tp->netdev); rtl8152_unload(tp); - free_all_urbs(tp); - if (tp->rx_skb) - dev_kfree_skb(tp->rx_skb); + free_all_mem(tp); free_netdev(tp->netdev); } } @@ -1732,11 +2056,12 @@ MODULE_DEVICE_TABLE(usb, rtl8152_table); static struct usb_driver rtl8152_driver = { .name = MODULENAME, + .id_table = rtl8152_table, .probe = rtl8152_probe, .disconnect = rtl8152_disconnect, - .id_table = rtl8152_table, .suspend = rtl8152_suspend, - .resume = rtl8152_resume + .resume = rtl8152_resume, + .reset_resume = rtl8152_resume, }; module_usb_driver(rtl8152_driver); -- cgit v0.10.2 From 5bd23881744b72dc22dfd37738d0e82d35306b76 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Wed, 14 Aug 2013 20:54:39 +0800 Subject: net/usb/r8152: enable tx checksum Enable tx checksum. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index abb0b9f..4d938a7 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include /* Version Information */ #define DRIVER_VERSION "v1.01.0 (2013/08/12)" @@ -314,8 +316,13 @@ struct tx_desc { u32 opts1; #define TX_FS (1 << 31) /* First segment of a packet */ #define TX_LS (1 << 30) /* Final segment of a packet */ -#define TX_LEN_MASK 0xffff +#define TX_LEN_MASK 0x3ffff + u32 opts2; +#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ +#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ +#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ +#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */ }; struct rx_agg { @@ -968,6 +975,52 @@ err1: return -ENOMEM; } +static void +r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) +{ + memset(desc, 0, sizeof(*desc)); + + desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + __be16 protocol; + u8 ip_protocol; + u32 opts2 = 0; + + if (skb->protocol == htons(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + switch (protocol) { + case htons(ETH_P_IP): + opts2 |= IPV4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts2 |= IPV6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) { + opts2 |= TCP_CS; + opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17; + } else if (ip_protocol == IPPROTO_UDP) { + opts2 |= UDP_CS; + } else { + WARN_ON_ONCE(1); + } + + desc->opts2 = cpu_to_le32(opts2); + } +} + static void rx_bottom(struct r8152 *tp) { struct net_device_stats *stats; @@ -1097,8 +1150,7 @@ next_agg: tx_desc = (struct tx_desc *)tx_data; tx_data += sizeof(*tx_desc); - tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | - TX_LS); + r8152_tx_csum(tp, tx_desc, skb); memcpy(tx_data, skb->data, len); agg->skb_num++; agg->skb_len += len; @@ -1256,7 +1308,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, agg->skb_num = agg->skb_len = 0; len = skb->len; - tx_desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS); + r8152_tx_csum(tp, tx_desc, skb); memcpy(tx_data, skb->data, len); dev_kfree_skb_any(skb); agg->skb_num++; @@ -1977,7 +2029,9 @@ static int rtl8152_probe(struct usb_interface *intf, tp->netdev = netdev; netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; - netdev->features &= ~NETIF_F_IP_CSUM; + + netdev->features |= NETIF_F_IP_CSUM; + netdev->hw_features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &ops); tp->speed = 0; -- cgit v0.10.2 From 40a82917b1d3a8aecedee6b64949795b75359731 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Wed, 14 Aug 2013 20:54:40 +0800 Subject: net/usb/r8152: enable interrupt transfer Use the interrupt transfer to replace polling link status. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 4d938a7..ef2498c 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -272,6 +272,9 @@ enum rtl_register_content { #define RTL8152_MAX_TX 10 #define RTL8152_MAX_RX 10 +#define INTBUFSIZE 2 + +#define INTR_LINK 0x0004 #define RTL8152_REQT_READ 0xc0 #define RTL8152_REQT_WRITE 0x40 @@ -292,7 +295,8 @@ enum rtl_register_content { enum rtl8152_flags { RTL8152_UNPLUG = 0, RTL8152_SET_RX_MODE, - WORK_ENABLE + WORK_ENABLE, + RTL8152_LINK_CHG, }; /* Define these values to match your device */ @@ -347,7 +351,9 @@ struct r8152 { unsigned long flags; struct usb_device *udev; struct tasklet_struct tl; + struct usb_interface *intf; struct net_device *netdev; + struct urb *intr_urb; struct tx_agg tx_info[RTL8152_MAX_TX]; struct rx_agg rx_info[RTL8152_MAX_RX]; struct list_head rx_done, tx_free; @@ -355,8 +361,10 @@ struct r8152 { spinlock_t rx_lock, tx_lock; struct delayed_work schedule; struct mii_if_info mii; + int intr_interval; u32 msg_enable; u16 ocp_base; + u8 *intr_buff; u8 version; u8 speed; }; @@ -860,6 +868,62 @@ static void write_bulk_callback(struct urb *urb) tasklet_schedule(&tp->tl); } +static void intr_callback(struct urb *urb) +{ + struct r8152 *tp; + __u16 *d; + int status = urb->status; + int res; + + tp = urb->context; + if (!tp) + return; + + if (!test_bit(WORK_ENABLE, &tp->flags)) + return; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + switch (status) { + case 0: /* success */ + break; + case -ECONNRESET: /* unlink */ + case -ESHUTDOWN: + netif_device_detach(tp->netdev); + case -ENOENT: + return; + case -EOVERFLOW: + netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); + goto resubmit; + /* -EPIPE: should clear the halt */ + default: + netif_info(tp, intr, tp->netdev, "intr status %d\n", status); + goto resubmit; + } + + d = urb->transfer_buffer; + if (INTR_LINK & __le16_to_cpu(d[0])) { + if (!(tp->speed & LINK_STATUS)) { + set_bit(RTL8152_LINK_CHG, &tp->flags); + schedule_delayed_work(&tp->schedule, 0); + } + } else { + if (tp->speed & LINK_STATUS) { + set_bit(RTL8152_LINK_CHG, &tp->flags); + schedule_delayed_work(&tp->schedule, 0); + } + } + +resubmit: + res = usb_submit_urb(urb, GFP_ATOMIC); + if (res == -ENODEV) + netif_device_detach(tp->netdev); + else if (res) + netif_err(tp, intr, tp->netdev, + "can't resubmit intr, status %d\n", res); +} + static inline void *rx_agg_align(void *data) { return (void *)ALIGN((uintptr_t)data, 8); @@ -899,11 +963,24 @@ static void free_all_mem(struct r8152 *tp) tp->tx_info[i].head = NULL; } } + + if (tp->intr_urb) { + usb_free_urb(tp->intr_urb); + tp->intr_urb = NULL; + } + + if (tp->intr_buff) { + kfree(tp->intr_buff); + tp->intr_buff = NULL; + } } static int alloc_all_mem(struct r8152 *tp) { struct net_device *netdev = tp->netdev; + struct usb_interface *intf = tp->intf; + struct usb_host_interface *alt = intf->cur_altsetting; + struct usb_host_endpoint *ep_intr = alt->endpoint + 2; struct urb *urb; int node, i; u8 *buf; @@ -968,6 +1045,19 @@ static int alloc_all_mem(struct r8152 *tp) list_add_tail(&tp->tx_info[i].list, &tp->tx_free); } + tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!tp->intr_urb) + goto err1; + + tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); + if (!tp->intr_buff) + goto err1; + + tp->intr_interval = (int)ep_intr->desc.bInterval; + usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3), + tp->intr_buff, INTBUFSIZE, intr_callback, + tp, tp->intr_interval); + return 0; err1: @@ -1228,8 +1318,10 @@ static void rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); - if (tp->speed & LINK_STATUS) + if (tp->speed & LINK_STATUS) { set_bit(RTL8152_SET_RX_MODE, &tp->flags); + schedule_delayed_work(&tp->schedule, 0); + } } static void _rtl8152_set_rx_mode(struct net_device *netdev) @@ -1648,7 +1740,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) r8152_mdio_write(tp, MII_BMCR, bmcr); out: - schedule_delayed_work(&tp->schedule, 5 * HZ); return ret; } @@ -1671,6 +1762,7 @@ static void set_carrier(struct r8152 *tp) struct net_device *netdev = tp->netdev; u8 speed; + clear_bit(RTL8152_LINK_CHG, &tp->flags); speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { @@ -1700,13 +1792,12 @@ static void rtl_work_func_t(struct work_struct *work) if (test_bit(RTL8152_UNPLUG, &tp->flags)) goto out1; - set_carrier(tp); + if (test_bit(RTL8152_LINK_CHG, &tp->flags)) + set_carrier(tp); if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev); - schedule_delayed_work(&tp->schedule, HZ); - out1: return; } @@ -1716,28 +1807,20 @@ static int rtl8152_open(struct net_device *netdev) struct r8152 *tp = netdev_priv(netdev); int res = 0; - tp->speed = rtl8152_get_speed(tp); - if (tp->speed & LINK_STATUS) { - res = rtl8152_enable(tp); - if (res) { - if (res == -ENODEV) - netif_device_detach(tp->netdev); - - netif_err(tp, ifup, netdev, - "rtl8152_open failed: %d\n", res); - return res; - } - - netif_carrier_on(netdev); - } else { - netif_stop_queue(netdev); - netif_carrier_off(netdev); + res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); + if (res) { + if (res == -ENODEV) + netif_device_detach(tp->netdev); + netif_warn(tp, ifup, netdev, + "intr_urb submit failed: %d\n", res); + return res; } rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL); + tp->speed = 0; + netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); - schedule_delayed_work(&tp->schedule, 0); return res; } @@ -1747,6 +1830,7 @@ static int rtl8152_close(struct net_device *netdev) struct r8152 *tp = netdev_priv(netdev); int res = 0; + usb_kill_urb(tp->intr_urb); clear_bit(WORK_ENABLE, &tp->flags); cancel_delayed_work_sync(&tp->schedule); netif_stop_queue(netdev); @@ -1872,6 +1956,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) if (netif_running(tp->netdev)) { clear_bit(WORK_ENABLE, &tp->flags); + usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); tasklet_disable(&tp->tl); } @@ -1888,10 +1973,11 @@ static int rtl8152_resume(struct usb_interface *intf) r8152b_init(tp); netif_device_attach(tp->netdev); if (netif_running(tp->netdev)) { - rtl8152_enable(tp); + rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL); + tp->speed = 0; + netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); - set_bit(RTL8152_SET_RX_MODE, &tp->flags); - schedule_delayed_work(&tp->schedule, 0); + usb_submit_urb(tp->intr_urb, GFP_KERNEL); tasklet_enable(&tp->tl); } @@ -2027,13 +2113,13 @@ static int rtl8152_probe(struct usb_interface *intf, tp->udev = udev; tp->netdev = netdev; + tp->intf = intf; netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; netdev->features |= NETIF_F_IP_CSUM; netdev->hw_features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &ops); - tp->speed = 0; tp->mii.dev = netdev; tp->mii.mdio_read = read_mii_word; -- cgit v0.10.2 From dee08ab83d0378d922b67e7cf10bbec3e4ea343b Mon Sep 17 00:00:00 2001 From: Luis Henriques Date: Wed, 14 Aug 2013 23:10:06 +0100 Subject: net: rfkill: Do not ignore errors from regulator_enable() Function regulator_enable() may return an error that has to be checked. This patch changes function rfkill_regulator_set_block() so that it checks for the return code. Also, rfkill_data->reg_enabled is set to 'true' only if there is no error. This fixes the following compilation warning: net/rfkill/rfkill-regulator.c:43:20: warning: ignoring return value of 'regulator_enable', declared with attribute warn_unused_result [-Wunused-result] Signed-off-by: Luis Henriques Signed-off-by: Johannes Berg diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c index d11ac79..cf5b145 100644 --- a/net/rfkill/rfkill-regulator.c +++ b/net/rfkill/rfkill-regulator.c @@ -30,6 +30,7 @@ struct rfkill_regulator_data { static int rfkill_regulator_set_block(void *data, bool blocked) { struct rfkill_regulator_data *rfkill_data = data; + int ret = 0; pr_debug("%s: blocked: %d\n", __func__, blocked); @@ -40,15 +41,16 @@ static int rfkill_regulator_set_block(void *data, bool blocked) } } else { if (!rfkill_data->reg_enabled) { - regulator_enable(rfkill_data->vcc); - rfkill_data->reg_enabled = true; + ret = regulator_enable(rfkill_data->vcc); + if (!ret) + rfkill_data->reg_enabled = true; } } pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__, regulator_is_enabled(rfkill_data->vcc)); - return 0; + return ret; } static struct rfkill_ops rfkill_regulator_ops = { -- cgit v0.10.2 From 4061f895088a2825b15312947aedb8574e2d8de5 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Sat, 10 Aug 2013 12:27:19 +0200 Subject: brcmfmac: use irq safe spinlock in brcmf_sdbrcm_txdata() Firmware-signalling needs transmit to firmware to be atomic and uses a spinlock with irq disabled. Therefor, brcmf_sdbrcm_txdata() should not use spin_unlock_bh() as it would enable the interrupts. Reviewed-by: Hante Meuleman Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index 080395f..9249b6d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h @@ -36,7 +36,11 @@ struct brcmf_bus_dcmd { * * @init: prepare for communication with dongle. * @stop: clear pending frames, disable data flow. - * @txdata: send a data frame to the dongle (callee disposes skb). + * @txdata: send a data frame to the dongle. When the data + * has been transferred, the common driver must be + * notified using brcmf_txcomplete(). The common + * driver calls this function with interrupts + * disabled. * @txctl: transmit a control request message to dongle. * @rxctl: receive a control response message from dongle. * @gettxq: obtain a reference of bus transmit queue (optional). diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 2641119..5cbce1d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -2276,6 +2276,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct brcmf_sdio *bus = sdiodev->bus; + ulong flags; brcmf_dbg(TRACE, "Enter\n"); @@ -2293,7 +2294,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) bus->sdcnt.fcqueued++; /* Priority based enq */ - spin_lock_bh(&bus->txqlock); + spin_lock_irqsave(&bus->txqlock, flags); if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { skb_pull(pkt, SDPCM_HDRLEN); brcmf_txcomplete(bus->sdiodev->dev, pkt, false); @@ -2307,7 +2308,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) bus->txoff = true; brcmf_txflowblock(bus->sdiodev->dev, true); } - spin_unlock_bh(&bus->txqlock); + spin_unlock_irqrestore(&bus->txqlock, flags); #ifdef DEBUG if (pktq_plen(&bus->txq, prec) > qcount[prec]) -- cgit v0.10.2 From 04779fddeff723cf5afe529e4ed67fc86f25ba37 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Sat, 10 Aug 2013 12:27:20 +0200 Subject: brcmfmac: .txdata() bus callback should not call brcmf_txcomplete() With firmware-signalling the packet handed to the bus specific driver layer should not be discarded with brcmf_txcomplete() in the failure path. Instead only an error is returned and the caller decides what to do with the packet. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 5cbce1d..db31312 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -2297,7 +2297,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) spin_lock_irqsave(&bus->txqlock, flags); if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { skb_pull(pkt, SDPCM_HDRLEN); - brcmf_txcomplete(bus->sdiodev->dev, pkt, false); brcmf_err("out of bus->txq !!!\n"); ret = -ENOSR; } else { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 29b1f24..601b0d0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -1745,6 +1745,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) int fifo = BRCMF_FWS_FIFO_BCMC; bool multicast = is_multicast_ether_addr(eh->h_dest); bool pae = eh->h_proto == htons(ETH_P_PAE); + int ret; /* determine the priority */ if (!skb->priority) @@ -1759,7 +1760,10 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb); /* Use bus module to send data frame */ - return brcmf_bus_txdata(drvr->bus_if, skb); + ret = brcmf_bus_txdata(drvr->bus_if, skb); + if (ret < 0) + brcmf_txfinalize(drvr, skb, false); + return ret; } /* set control buffer information */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 322cadc..39e01a7 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -614,7 +614,6 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) return 0; fail: - brcmf_txcomplete(dev, skb, false); return ret; } -- cgit v0.10.2 From 87edd8916ee261cd8c49b736f156d602054bd9fd Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Sat, 10 Aug 2013 12:27:21 +0200 Subject: brcmfmac: add AMPDU reordering functionality This feature moves the responsibility of collecting all MPDUs in an AMPDU session in the correct order from the firmware to the host driver. This reduces buffer requirement on the firmware side. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index 86cbfe2..3943fb8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -209,6 +209,8 @@ #define BRCMF_DCMD_MEDLEN 1536 #define BRCMF_DCMD_MAXLEN 8192 +#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256 + /* Pattern matching filter. Specifies an offset within received packets to * start matching, the pattern to match, the size of the pattern, and a bitmask * that indicates which bits within the pattern should be matched. @@ -505,6 +507,25 @@ struct brcmf_dcmd { uint needed; /* bytes needed (optional) */ }; +/** + * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info + * + * @pktslots: dynamic allocated array for ordering AMPDU packets. + * @flow_id: AMPDU flow identifier. + * @cur_idx: last AMPDU index from firmware. + * @exp_idx: expected next AMPDU index. + * @max_idx: maximum amount of packets per AMPDU. + * @pend_pkts: number of packets currently in @pktslots. + */ +struct brcmf_ampdu_rx_reorder { + struct sk_buff **pktslots; + u8 flow_id; + u8 cur_idx; + u8 exp_idx; + u8 max_idx; + u8 pend_pkts; +}; + /* Forward decls for struct brcmf_pub (see below) */ struct brcmf_proto; /* device communication protocol info */ struct brcmf_cfg80211_dev; /* cfg80211 device info */ @@ -539,6 +560,9 @@ struct brcmf_pub { bool fw_signals; struct brcmf_fws_info *fws; spinlock_t fws_spinlock; + + struct brcmf_ampdu_rx_reorder + *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS]; #ifdef DEBUG struct dentry *dbgfs_dir; #endif @@ -604,6 +628,9 @@ struct brcmf_if { wait_queue_head_t pend_8021x_wait; }; +struct brcmf_skb_reorder_data { + u8 *reorder; +}; extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 8009901..9bc2785 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -38,6 +38,19 @@ MODULE_LICENSE("Dual BSD/GPL"); #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */ +/* AMPDU rx reordering definitions */ +#define BRCMF_RXREORDER_FLOWID_OFFSET 0 +#define BRCMF_RXREORDER_MAXIDX_OFFSET 2 +#define BRCMF_RXREORDER_FLAGS_OFFSET 4 +#define BRCMF_RXREORDER_CURIDX_OFFSET 6 +#define BRCMF_RXREORDER_EXPIDX_OFFSET 8 + +#define BRCMF_RXREORDER_DEL_FLOW 0x01 +#define BRCMF_RXREORDER_FLUSH_ALL 0x02 +#define BRCMF_RXREORDER_CURIDX_VALID 0x04 +#define BRCMF_RXREORDER_EXPIDX_VALID 0x08 +#define BRCMF_RXREORDER_NEW_HOLE 0x10 + /* Error bits */ int brcmf_msg_level; module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR); @@ -279,16 +292,243 @@ void brcmf_txflowblock(struct device *dev, bool state) } } +static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) +{ + skb->dev = ifp->ndev; + skb->protocol = eth_type_trans(skb, skb->dev); + + if (skb->pkt_type == PACKET_MULTICAST) + ifp->stats.multicast++; + + /* Process special event packets */ + brcmf_fweh_process_skb(ifp->drvr, skb); + + if (!(ifp->ndev->flags & IFF_UP)) { + brcmu_pkt_buf_free_skb(skb); + return; + } + + ifp->stats.rx_bytes += skb->len; + ifp->stats.rx_packets++; + + brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol)); + if (in_interrupt()) + netif_rx(skb); + else + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni(). + */ + netif_rx_ni(skb); +} + +static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi, + u8 start, u8 end, + struct sk_buff_head *skb_list) +{ + /* initialize return list */ + __skb_queue_head_init(skb_list); + + if (rfi->pend_pkts == 0) { + brcmf_dbg(INFO, "no packets in reorder queue\n"); + return; + } + + do { + if (rfi->pktslots[start]) { + __skb_queue_tail(skb_list, rfi->pktslots[start]); + rfi->pktslots[start] = NULL; + } + start++; + if (start > rfi->max_idx) + start = 0; + } while (start != end); + rfi->pend_pkts -= skb_queue_len(skb_list); +} + +static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, + struct sk_buff *pkt) +{ + u8 flow_id, max_idx, cur_idx, exp_idx, end_idx; + struct brcmf_ampdu_rx_reorder *rfi; + struct sk_buff_head reorder_list; + struct sk_buff *pnext; + u8 flags; + u32 buf_size; + + flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET]; + flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET]; + + /* validate flags and flow id */ + if (flags == 0xFF) { + brcmf_err("invalid flags...so ignore this packet\n"); + brcmf_netif_rx(ifp, pkt); + return; + } + + rfi = ifp->drvr->reorder_flows[flow_id]; + if (flags & BRCMF_RXREORDER_DEL_FLOW) { + brcmf_dbg(INFO, "flow-%d: delete\n", + flow_id); + + if (rfi == NULL) { + brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", + flow_id); + brcmf_netif_rx(ifp, pkt); + return; + } + + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx, + &reorder_list); + /* add the last packet */ + __skb_queue_tail(&reorder_list, pkt); + kfree(rfi); + ifp->drvr->reorder_flows[flow_id] = NULL; + goto netif_rx; + } + /* from here on we need a flow reorder instance */ + if (rfi == NULL) { + buf_size = sizeof(*rfi); + max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET]; + + buf_size += (max_idx + 1) * sizeof(pkt); + + /* allocate space for flow reorder info */ + brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n", + flow_id, max_idx); + rfi = kzalloc(buf_size, GFP_ATOMIC); + if (rfi == NULL) { + brcmf_err("failed to alloc buffer\n"); + brcmf_netif_rx(ifp, pkt); + return; + } + + ifp->drvr->reorder_flows[flow_id] = rfi; + rfi->pktslots = (struct sk_buff **)(rfi+1); + rfi->max_idx = max_idx; + } + if (flags & BRCMF_RXREORDER_NEW_HOLE) { + if (rfi->pend_pkts) { + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, + rfi->exp_idx, + &reorder_list); + WARN_ON(rfi->pend_pkts); + } else { + __skb_queue_head_init(&reorder_list); + } + rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET]; + rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; + rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET]; + rfi->pktslots[rfi->cur_idx] = pkt; + rfi->pend_pkts++; + brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n", + flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts); + } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) { + cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET]; + exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; + + if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) { + /* still in the current hole */ + /* enqueue the current on the buffer chain */ + if (rfi->pktslots[cur_idx] != NULL) { + brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n"); + brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]); + rfi->pktslots[cur_idx] = NULL; + } + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + rfi->cur_idx = cur_idx; + brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n", + flow_id, cur_idx, exp_idx, rfi->pend_pkts); + + /* can return now as there is no reorder + * list to process. + */ + return; + } + if (rfi->exp_idx == cur_idx) { + if (rfi->pktslots[cur_idx] != NULL) { + brcmf_dbg(INFO, "error buffer pending..free it\n"); + brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]); + rfi->pktslots[cur_idx] = NULL; + } + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + + /* got the expected one. flush from current to expected + * and update expected + */ + brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n", + flow_id, cur_idx, exp_idx, rfi->pend_pkts); + + rfi->cur_idx = cur_idx; + rfi->exp_idx = exp_idx; + + brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx, + &reorder_list); + brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n", + flow_id, skb_queue_len(&reorder_list), + rfi->pend_pkts); + } else { + u8 end_idx; + + brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n", + flow_id, flags, rfi->cur_idx, rfi->exp_idx, + cur_idx, exp_idx); + if (flags & BRCMF_RXREORDER_FLUSH_ALL) + end_idx = rfi->exp_idx; + else + end_idx = exp_idx; + + /* flush pkts first */ + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, + &reorder_list); + + if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) { + __skb_queue_tail(&reorder_list, pkt); + } else { + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + } + rfi->exp_idx = exp_idx; + rfi->cur_idx = cur_idx; + } + } else { + /* explicity window move updating the expected index */ + exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; + + brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n", + flow_id, flags, rfi->exp_idx, exp_idx); + if (flags & BRCMF_RXREORDER_FLUSH_ALL) + end_idx = rfi->exp_idx; + else + end_idx = exp_idx; + + brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, + &reorder_list); + __skb_queue_tail(&reorder_list, pkt); + /* set the new expected idx */ + rfi->exp_idx = exp_idx; + } +netif_rx: + skb_queue_walk_safe(&reorder_list, pkt, pnext) { + __skb_unlink(pkt, &reorder_list); + brcmf_netif_rx(ifp, pkt); + } +} + void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) { struct sk_buff *skb, *pnext; struct brcmf_if *ifp; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_skb_reorder_data *rd; u8 ifidx; int ret; - brcmf_dbg(DATA, "Enter\n"); + brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev), + skb_queue_len(skb_list)); skb_queue_walk_safe(skb_list, skb, pnext) { skb_unlink(skb, skb_list); @@ -304,31 +544,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) continue; } - skb->dev = ifp->ndev; - skb->protocol = eth_type_trans(skb, skb->dev); - - if (skb->pkt_type == PACKET_MULTICAST) - ifp->stats.multicast++; - - /* Process special event packets */ - brcmf_fweh_process_skb(drvr, skb); - - if (!(ifp->ndev->flags & IFF_UP)) { - brcmu_pkt_buf_free_skb(skb); - continue; - } - - ifp->stats.rx_bytes += skb->len; - ifp->stats.rx_packets++; - - if (in_interrupt()) - netif_rx(skb); + rd = (struct brcmf_skb_reorder_data *)skb->cb; + if (rd->reorder) + brcmf_rxreorder_process_info(ifp, rd->reorder, skb); else - /* If the receive is not processed inside an ISR, - * the softirqd must be woken explicitly to service the - * NET_RX_SOFTIRQ. This is handled by netif_rx_ni(). - */ - netif_rx_ni(skb); + brcmf_netif_rx(ifp, skb); } } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 601b0d0..438c7b9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -1483,6 +1483,7 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, struct sk_buff *skb) { + struct brcmf_skb_reorder_data *rd; struct brcmf_fws_info *fws = drvr->fws; u8 *signal_data; s16 data_len; @@ -1536,9 +1537,12 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, err = BRCMF_FWS_RET_OK_NOSCHEDULE; switch (type) { - case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS: case BRCMF_FWS_TYPE_COMP_TXSTATUS: break; + case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS: + rd = (struct brcmf_skb_reorder_data *)skb->cb; + rd->reorder = data; + break; case BRCMF_FWS_TYPE_MACDESC_ADD: case BRCMF_FWS_TYPE_MACDESC_DEL: brcmf_fws_macdesc_indicate(fws, type, data); @@ -1747,6 +1751,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) bool pae = eh->h_proto == htons(ETH_P_PAE); int ret; + brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); /* determine the priority */ if (!skb->priority) skb->priority = cfg80211_classify8021d(skb); @@ -1915,7 +1920,8 @@ int brcmf_fws_init(struct brcmf_pub *drvr) if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE) tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS | BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS | - BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE; + BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE | + BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE; rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP, brcmf_fws_notify_credit_map); @@ -1940,6 +1946,9 @@ int brcmf_fws_init(struct brcmf_pub *drvr) goto fail_event; } + if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1)) + brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n"); + brcmf_fws_hanger_init(&drvr->fws->hanger); brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0); brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other); -- cgit v0.10.2 From 0a4254be94fd3afc17fb84a88adabd4460dd4c56 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Sat, 10 Aug 2013 12:27:22 +0200 Subject: brcmfmac: always use worker thread for tx data. When fw signalling is disabled tx is sent immediately. Using queues and worker thread allows usb to do synchronous autopm. This patch makes fws use queues and worker thread even if signalling is not supported by FW or not enabled. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index 3943fb8..df94d0e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -557,7 +557,6 @@ struct brcmf_pub { struct brcmf_fweh_info fweh; - bool fw_signals; struct brcmf_fws_info *fws; spinlock_t fws_spinlock; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 9bc2785..e067aec 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -278,18 +278,10 @@ void brcmf_txflowblock(struct device *dev, bool state) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_pub *drvr = bus_if->drvr; - int i; brcmf_dbg(TRACE, "Enter\n"); - if (brcmf_fws_fc_active(drvr->fws)) { - brcmf_fws_bus_blocked(drvr, state); - } else { - for (i = 0; i < BRCMF_MAX_IFS; i++) - brcmf_txflowblock_if(drvr->iflist[i], - BRCMF_NETIF_STOP_REASON_BLOCK_BUS, - state); - } + brcmf_fws_bus_blocked(drvr, state); } static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) @@ -534,7 +526,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) skb_unlink(skb, skb_list); /* process and remove protocol-specific header */ - ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb); + ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb); ifp = drvr->iflist[ifidx]; if (ret || !ifp || !ifp->ndev) { @@ -1109,7 +1101,6 @@ int brcmf_bus_start(struct device *dev) if (ret < 0) goto fail; - drvr->fw_signals = true; ret = brcmf_fws_init(drvr); if (ret < 0) goto fail; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 438c7b9..15fc807 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -425,6 +425,7 @@ struct brcmf_fws_info { struct brcmf_fws_stats stats; struct brcmf_fws_hanger hanger; enum brcmf_fws_fcmode fcmode; + bool fw_signals; bool bcmc_credit_check; struct brcmf_fws_macdesc_table desc; struct workqueue_struct *fws_wq; @@ -1160,7 +1161,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws) { /* only schedule dequeue when there are credits for delayed traffic */ - if (fws->fifo_credit_map & fws->fifo_delay_map) + if ((fws->fifo_credit_map & fws->fifo_delay_map) || + (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map)) queue_work(fws->fws_wq, &fws->fws_dequeue_work); } @@ -1498,8 +1500,10 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, WARN_ON(signal_len > skb->len); + if (!signal_len) + return 0; /* if flow control disabled, skip to packet data and leave */ - if (!signal_len || !drvr->fw_signals) { + if (!fws->fw_signals) { skb_pull(skb, signal_len); return 0; } @@ -1749,7 +1753,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) int fifo = BRCMF_FWS_FIFO_BCMC; bool multicast = is_multicast_ether_addr(eh->h_dest); bool pae = eh->h_proto == htons(ETH_P_PAE); - int ret; brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); /* determine the priority */ @@ -1760,17 +1763,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) if (pae) atomic_inc(&ifp->pend_8021x_cnt); - if (!brcmf_fws_fc_active(fws)) { - /* If the protocol uses a data header, apply it */ - brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb); - - /* Use bus module to send data frame */ - ret = brcmf_bus_txdata(drvr->bus_if, skb); - if (ret < 0) - brcmf_txfinalize(drvr, skb, false); - return ret; - } - /* set control buffer information */ skcb->if_flags = 0; skcb->state = BRCMF_FWS_SKBSTATE_NEW; @@ -1818,7 +1810,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) struct brcmf_fws_info *fws = ifp->drvr->fws; struct brcmf_fws_mac_descriptor *entry; - if (!ifp->ndev || !ifp->drvr->fw_signals) + if (!ifp->ndev) return; entry = &fws->desc.iface[ifp->ifidx]; @@ -1849,15 +1841,38 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp) static void brcmf_fws_dequeue_worker(struct work_struct *worker) { struct brcmf_fws_info *fws; + struct brcmf_pub *drvr; struct sk_buff *skb; ulong flags; int fifo; + u32 hslot; + u32 ifidx; + int ret; fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); + drvr = fws->drvr; - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(drvr, flags); for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked; fifo--) { + if (!brcmf_fws_fc_active(fws)) { + while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) { + hslot = brcmf_skb_htod_tag_get_field(skb, + HSLOT); + brcmf_fws_hanger_poppkt(&fws->hanger, hslot, + &skb, true); + ifidx = brcmf_skb_if_flags_get_field(skb, + INDEX); + brcmf_proto_hdrpush(drvr, ifidx, 0, skb); + /* Use bus module to send data frame */ + ret = brcmf_bus_txdata(drvr->bus_if, skb); + if (ret < 0) + brcmf_txfinalize(drvr, skb, false); + if (fws->bus_flow_blocked) + break; + } + continue; + } while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) && (fifo == BRCMF_FWS_FIFO_BCMC))) { skb = brcmf_fws_deq(fws, fifo); @@ -1885,17 +1900,15 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) } } } - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(drvr, flags); } int brcmf_fws_init(struct brcmf_pub *drvr) { + struct brcmf_fws_info *fws; u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS; int rc; - if (!drvr->fw_signals) - return 0; - spin_lock_init(&drvr->fws_spinlock); drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL); @@ -1904,20 +1917,21 @@ int brcmf_fws_init(struct brcmf_pub *drvr) goto fail; } + fws = drvr->fws; /* set linkage back */ - drvr->fws->drvr = drvr; - drvr->fws->fcmode = fcmode; + fws->drvr = drvr; + fws->fcmode = fcmode; - drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); - if (drvr->fws->fws_wq == NULL) { + fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); + if (fws->fws_wq == NULL) { brcmf_err("workqueue creation failed\n"); rc = -EBADF; goto fail; } - INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker); + INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker); /* enable firmware signalling if fcmode active */ - if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE) + if (fws->fcmode != BRCMF_FWS_FCMODE_NONE) tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS | BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS | BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE | @@ -1937,34 +1951,33 @@ int brcmf_fws_init(struct brcmf_pub *drvr) goto fail; } - /* setting the iovar may fail if feature is unsupported + /* Setting the iovar may fail if feature is unsupported * so leave the rc as is so driver initialization can - * continue. + * continue. Set mode back to none indicating not enabled. */ + fws->fw_signals = true; if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) { brcmf_err("failed to set bdcv2 tlv signaling\n"); - goto fail_event; + fws->fcmode = BRCMF_FWS_FCMODE_NONE; + fws->fw_signals = false; } if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1)) brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n"); - brcmf_fws_hanger_init(&drvr->fws->hanger); - brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0); - brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other); - brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, + brcmf_fws_hanger_init(&fws->hanger); + brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0); + brcmf_fws_macdesc_set_name(fws, &fws->desc.other); + brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); /* create debugfs file for statistics */ - brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats); + brcmf_debugfs_create_fws_stats(drvr, &fws->stats); brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", - drvr->fw_signals ? "enabled" : "disabled", tlv); + fws->fw_signals ? "enabled" : "disabled", tlv); return 0; -fail_event: - brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT); - brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); fail: brcmf_fws_deinit(drvr); return rc; @@ -1978,11 +1991,6 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr) if (!fws) return; - /* disable firmware signalling entirely - * to avoid using the workqueue. - */ - drvr->fw_signals = false; - if (drvr->fws->fws_wq) destroy_workqueue(drvr->fws->fws_wq); @@ -1998,7 +2006,7 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr) bool brcmf_fws_fc_active(struct brcmf_fws_info *fws) { - if (!fws) + if (!fws->creditmap_received) return false; return fws->fcmode != BRCMF_FWS_FCMODE_NONE; -- cgit v0.10.2 From 3f4f910fdc3b9eff06a007ab28762cd3d6720d51 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Sat, 10 Aug 2013 12:27:23 +0200 Subject: brcmfmac: no fws locking outside fws module. FWS uses locking to protect its data while being called from various entries. On bus_txdata the lock was kept resulting in unnecessary long locking, but also creating possibility for deadlock. This update changes the locking to release lock when bus_txdata is called. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index df94d0e..1273dfd 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -558,7 +558,6 @@ struct brcmf_pub { struct brcmf_fweh_info fweh; struct brcmf_fws_info *fws; - spinlock_t fws_spinlock; struct brcmf_ampdu_rx_reorder *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS]; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 15fc807..82f9140 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -422,6 +422,8 @@ struct brcmf_fws_macdesc_table { struct brcmf_fws_info { struct brcmf_pub *drvr; + spinlock_t spinlock; + ulong flags; struct brcmf_fws_stats stats; struct brcmf_fws_hanger hanger; enum brcmf_fws_fcmode fcmode; @@ -484,6 +486,18 @@ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws, } #undef BRCMF_FWS_TLV_DEF +static void brcmf_fws_lock(struct brcmf_fws_info *fws) + __acquires(&fws->spinlock) +{ + spin_lock_irqsave(&fws->spinlock, fws->flags); +} + +static void brcmf_fws_unlock(struct brcmf_fws_info *fws) + __releases(&fws->spinlock) +{ + spin_unlock_irqrestore(&fws->spinlock, fws->flags); +} + static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg) { u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); @@ -870,8 +884,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws, skcb->state = BRCMF_FWS_SKBSTATE_TIM; bus = fws->drvr->bus_if; err = brcmf_fws_hdrpush(fws, skb); - if (err == 0) + if (err == 0) { + brcmf_fws_unlock(fws); err = brcmf_bus_txdata(bus, skb); + brcmf_fws_lock(fws); + } if (err) brcmu_pkt_buf_free_skb(skb); return true; @@ -906,26 +923,10 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi) return 0; } -/* using macro so sparse checking does not complain - * about locking imbalance. - */ -#define brcmf_fws_lock(drvr, flags) \ -do { \ - flags = 0; \ - spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \ -} while (0) - -/* using macro so sparse checking does not complain - * about locking imbalance. - */ -#define brcmf_fws_unlock(drvr, flags) \ - spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags)) - static int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry, *existing; - ulong flags; u8 mac_handle; u8 ifidx; u8 *addr; @@ -939,10 +940,10 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) if (entry->occupied) { brcmf_dbg(TRACE, "deleting %s mac %pM\n", entry->name, addr); - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); brcmf_fws_macdesc_cleanup(fws, entry, -1); brcmf_fws_macdesc_deinit(entry); - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); } else fws->stats.mac_update_failed++; return 0; @@ -951,13 +952,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) existing = brcmf_fws_macdesc_lookup(fws, addr); if (IS_ERR(existing)) { if (!entry->occupied) { - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); entry->mac_handle = mac_handle; brcmf_fws_macdesc_init(entry, addr, ifidx); brcmf_fws_macdesc_set_name(fws, entry); brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); } else { fws->stats.mac_update_failed++; @@ -965,13 +966,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) } else { if (entry != existing) { brcmf_dbg(TRACE, "copy mac %s\n", existing->name); - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); memcpy(entry, existing, offsetof(struct brcmf_fws_mac_descriptor, psq)); entry->mac_handle = mac_handle; brcmf_fws_macdesc_deinit(existing); brcmf_fws_macdesc_set_name(fws, entry); - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, addr); } else { @@ -987,7 +988,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; - ulong flags; u8 mac_handle; int ret; @@ -997,7 +997,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, fws->stats.mac_ps_update_failed++; return -ESRCH; } - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); /* a state update should wipe old credits */ entry->requested_credit = 0; entry->requested_packet = 0; @@ -1012,7 +1012,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); ret = BRCMF_FWS_RET_OK_NOSCHEDULE; } - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); return ret; } @@ -1020,7 +1020,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; - ulong flags; u8 ifidx; int ret; @@ -1039,7 +1038,7 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type, entry->name); - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); switch (type) { case BRCMF_FWS_TYPE_INTERFACE_OPEN: entry->state = BRCMF_FWS_STATE_OPEN; @@ -1051,10 +1050,10 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, break; default: ret = -EINVAL; - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); goto fail; } - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); return ret; fail: @@ -1066,7 +1065,6 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; - ulong flags; entry = &fws->desc.nodes[data[1] & 0x1F]; if (!entry->occupied) { @@ -1080,14 +1078,14 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n", brcmf_fws_get_tlv_name(type), type, entry->name, data[0], data[2]); - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) entry->requested_credit = data[0]; else entry->requested_packet = data[0]; entry->ac_bitmap = data[2]; - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_SCHEDULE; } @@ -1385,7 +1383,6 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, u8 *data) { - ulong flags; int i; if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) { @@ -1394,19 +1391,18 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, } brcmf_dbg(DATA, "enter: data %pM\n", data); - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) brcmf_fws_return_credits(fws, i, data[i]); brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map, fws->fifo_delay_map); - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_SCHEDULE; } static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) { - ulong lflags; __le32 status_le; u32 status; u32 hslot; @@ -1420,9 +1416,9 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) hslot = brcmf_txstatus_get_field(status, HSLOT); genbit = brcmf_txstatus_get_field(status, GENERATION); - brcmf_fws_lock(fws->drvr, lflags); + brcmf_fws_lock(fws); brcmf_fws_txs_process(fws, flags, hslot, genbit); - brcmf_fws_unlock(fws->drvr, lflags); + brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_NOSCHEDULE; } @@ -1442,7 +1438,6 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, { struct brcmf_fws_info *fws = ifp->drvr->fws; int i; - ulong flags; u8 *credits = data; if (e->datalen < BRCMF_FWS_FIFO_COUNT) { @@ -1455,7 +1450,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, fws->creditmap_received = true; brcmf_dbg(TRACE, "enter: credits %pM\n", credits); - brcmf_fws_lock(ifp->drvr, flags); + brcmf_fws_lock(fws); for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) { if (*credits) fws->fifo_credit_map |= 1 << i; @@ -1464,7 +1459,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, fws->fifo_credit[i] = *credits++; } brcmf_fws_schedule_deq(fws); - brcmf_fws_unlock(ifp->drvr, flags); + brcmf_fws_unlock(fws); return 0; } @@ -1473,12 +1468,11 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, void *data) { struct brcmf_fws_info *fws = ifp->drvr->fws; - ulong flags; - brcmf_fws_lock(ifp->drvr, flags); + brcmf_fws_lock(fws); if (fws) fws->bcmc_credit_check = true; - brcmf_fws_unlock(ifp->drvr, flags); + brcmf_fws_unlock(fws); return 0; } @@ -1702,17 +1696,22 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, return PTR_ERR(entry); brcmf_fws_precommit_skb(fws, fifo, skb); + entry->transit_count++; + if (entry->suppressed) + entry->suppr_transit_count++; + brcmf_fws_unlock(fws); rc = brcmf_bus_txdata(bus, skb); + brcmf_fws_lock(fws); brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name, skcb->if_flags, skcb->htod, rc); if (rc < 0) { + entry->transit_count--; + if (entry->suppressed) + entry->suppr_transit_count--; brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); goto rollback; } - entry->transit_count++; - if (entry->suppressed) - entry->suppr_transit_count++; fws->stats.pkt2bus++; fws->stats.send_pkts[fifo]++; if (brcmf_skb_if_flags_get_field(skb, REQUESTED)) @@ -1749,7 +1748,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) struct brcmf_fws_info *fws = drvr->fws; struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); struct ethhdr *eh = (struct ethhdr *)(skb->data); - ulong flags; int fifo = BRCMF_FWS_FIFO_BCMC; bool multicast = is_multicast_ether_addr(eh->h_dest); bool pae = eh->h_proto == htons(ETH_P_PAE); @@ -1770,7 +1768,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) if (!multicast) fifo = brcmf_fws_prio2fifo[skb->priority]; - brcmf_fws_lock(drvr, flags); + brcmf_fws_lock(fws); if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC) fws->borrow_defer_timestamp = jiffies + BRCMF_FWS_BORROW_DEFER_PERIOD; @@ -1790,7 +1788,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) } brcmu_pkt_buf_free_skb(skb); } - brcmf_fws_unlock(drvr, flags); + brcmf_fws_unlock(fws); return 0; } @@ -1825,17 +1823,16 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) void brcmf_fws_del_interface(struct brcmf_if *ifp) { struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; - ulong flags; if (!entry) return; - brcmf_fws_lock(ifp->drvr, flags); + brcmf_fws_lock(ifp->drvr->fws); ifp->fws_desc = NULL; brcmf_dbg(TRACE, "deleting %s\n", entry->name); brcmf_fws_macdesc_deinit(entry); brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); - brcmf_fws_unlock(ifp->drvr, flags); + brcmf_fws_unlock(ifp->drvr->fws); } static void brcmf_fws_dequeue_worker(struct work_struct *worker) @@ -1843,7 +1840,6 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) struct brcmf_fws_info *fws; struct brcmf_pub *drvr; struct sk_buff *skb; - ulong flags; int fifo; u32 hslot; u32 ifidx; @@ -1852,7 +1848,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); drvr = fws->drvr; - brcmf_fws_lock(drvr, flags); + brcmf_fws_lock(fws); for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked; fifo--) { if (!brcmf_fws_fc_active(fws)) { @@ -1865,7 +1861,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) INDEX); brcmf_proto_hdrpush(drvr, ifidx, 0, skb); /* Use bus module to send data frame */ + brcmf_fws_unlock(fws); ret = brcmf_bus_txdata(drvr->bus_if, skb); + brcmf_fws_lock(fws); if (ret < 0) brcmf_txfinalize(drvr, skb, false); if (fws->bus_flow_blocked) @@ -1900,7 +1898,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) } } } - brcmf_fws_unlock(drvr, flags); + brcmf_fws_unlock(fws); } int brcmf_fws_init(struct brcmf_pub *drvr) @@ -1909,8 +1907,6 @@ int brcmf_fws_init(struct brcmf_pub *drvr) u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS; int rc; - spin_lock_init(&drvr->fws_spinlock); - drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL); if (!drvr->fws) { rc = -ENOMEM; @@ -1918,6 +1914,9 @@ int brcmf_fws_init(struct brcmf_pub *drvr) } fws = drvr->fws; + + spin_lock_init(&fws->spinlock); + /* set linkage back */ fws->drvr = drvr; fws->fcmode = fcmode; @@ -1986,7 +1985,6 @@ fail: void brcmf_fws_deinit(struct brcmf_pub *drvr) { struct brcmf_fws_info *fws = drvr->fws; - ulong flags; if (!fws) return; @@ -1995,10 +1993,10 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr) destroy_workqueue(drvr->fws->fws_wq); /* cleanup */ - brcmf_fws_lock(drvr, flags); + brcmf_fws_lock(fws); brcmf_fws_cleanup(fws, -1); drvr->fws = NULL; - brcmf_fws_unlock(drvr, flags); + brcmf_fws_unlock(fws); /* free top structure */ kfree(fws); @@ -2014,17 +2012,16 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws) void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) { - ulong flags; u32 hslot; if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) { brcmu_pkt_buf_free_skb(skb); return; } - brcmf_fws_lock(fws->drvr, flags); + brcmf_fws_lock(fws); hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0); - brcmf_fws_unlock(fws->drvr, flags); + brcmf_fws_unlock(fws); } void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) -- cgit v0.10.2 From 2ee8382fc6c763c76396a6aaff77a27089eed3aa Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Sat, 10 Aug 2013 12:27:24 +0200 Subject: brcmfmac: ignore IF event if firmware indicates it Not every IF event from the firmware needs to result in a related interface, netdev or wdev, on the host. This is indicated in the event message. Handle that flag and effectively ignore the firmware event. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index 1273dfd..2eb9e64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -194,6 +194,8 @@ #define BRCMF_E_IF_DEL 2 #define BRCMF_E_IF_CHANGE 3 +#define BRCMF_E_IF_FLAG_NOIF 1 + #define BRCMF_E_IF_ROLE_STA 0 #define BRCMF_E_IF_ROLE_AP 1 #define BRCMF_E_IF_ROLE_WDS 2 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 83ee53a..fad77dd 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -185,6 +185,10 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, ifevent->action, ifevent->ifidx, ifevent->bssidx, ifevent->flags, ifevent->role); + if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) { + brcmf_dbg(EVENT, "event can be ignored\n"); + return; + } if (ifevent->ifidx >= BRCMF_MAX_IFS) { brcmf_err("invalid interface index: %u\n", ifevent->ifidx); -- cgit v0.10.2 From 89c2f382fff4ec8adf04264925e07e951d0552ce Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Sat, 10 Aug 2013 12:27:25 +0200 Subject: brcmfmac: add support for manual TDLS operations Implement the .tdls_oper() callback and indicate TDLS support in the wiphy flags. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h index 665ef69..ecabb04 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h @@ -69,4 +69,25 @@ struct brcmf_fil_bss_enable_le { __le32 enable; }; +/** + * struct tdls_iovar - common structure for tdls iovars. + * + * @ea: ether address of peer station. + * @mode: mode value depending on specific tdls iovar. + * @chanspec: channel specification. + * @pad: unused (for future use). + */ +struct brcmf_tdls_iovar_le { + u8 ea[ETH_ALEN]; /* Station address */ + u8 mode; /* mode: depends on iovar */ + __le16 chanspec; + __le32 pad; /* future */ +}; + +enum brcmf_tdls_manual_ep_ops { + BRCMF_TDLS_MANUAL_EP_CREATE = 1, + BRCMF_TDLS_MANUAL_EP_DELETE = 3, + BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6 +}; + #endif /* FWIL_TYPES_H_ */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index c3dfea3..0370e44 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4126,6 +4126,53 @@ static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy, clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); } +static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper) +{ + int ret; + + switch (oper) { + case NL80211_TDLS_DISCOVERY_REQ: + ret = BRCMF_TDLS_MANUAL_EP_DISCOVERY; + break; + case NL80211_TDLS_SETUP: + ret = BRCMF_TDLS_MANUAL_EP_CREATE; + break; + case NL80211_TDLS_TEARDOWN: + ret = BRCMF_TDLS_MANUAL_EP_DELETE; + break; + default: + brcmf_err("unsupported operation: %d\n", oper); + ret = -EOPNOTSUPP; + } + return ret; +} + +static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, + struct net_device *ndev, u8 *peer, + enum nl80211_tdls_operation oper) +{ + struct brcmf_if *ifp; + struct brcmf_tdls_iovar_le info; + int ret = 0; + + ret = brcmf_convert_nl80211_tdls_oper(oper); + if (ret < 0) + return ret; + + ifp = netdev_priv(ndev); + memset(&info, 0, sizeof(info)); + info.mode = (u8)ret; + if (peer) + memcpy(info.ea, peer, ETH_ALEN); + + ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint", + &info, sizeof(info)); + if (ret < 0) + brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret); + + return ret; +} + static struct cfg80211_ops wl_cfg80211_ops = { .add_virtual_intf = brcmf_cfg80211_add_iface, .del_virtual_intf = brcmf_cfg80211_del_iface, @@ -4164,6 +4211,7 @@ static struct cfg80211_ops wl_cfg80211_ops = { .stop_p2p_device = brcmf_p2p_stop_device, .crit_proto_start = brcmf_cfg80211_crit_proto_start, .crit_proto_stop = brcmf_cfg80211_crit_proto_stop, + .tdls_oper = brcmf_cfg80211_tdls_oper, CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode) }; @@ -4285,7 +4333,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT | WIPHY_FLAG_OFFCHAN_TX | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_SUPPORTS_TDLS; wiphy->mgmt_stypes = brcmf_txrx_stypes; wiphy->max_remain_on_channel_duration = 5000; brcmf_wiphy_pno_params(wiphy); @@ -4906,6 +4955,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, goto cfg80211_p2p_attach_out; } + err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1); + if (err) { + brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err); + wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS; + } + err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION, &io_type); if (err) { -- cgit v0.10.2 From b05e92545d9582be15699e4a33d0f93ac00b37dd Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Sat, 10 Aug 2013 12:27:26 +0200 Subject: brcmfmac: abstract tx packet processing functions Abstract brcmf_sdio_txpkt_prep and brcmf_sdio_txpkt_postp as a preparation of chained tx packets for host side tx glomming. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index e3f3c48..e13b1a6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -592,6 +592,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, uint flags, u8 *buf, uint nbytes) { struct sk_buff *mypkt; + struct sk_buff_head pktq; int err; mypkt = brcmu_pkt_buf_get_skb(nbytes); @@ -602,7 +603,10 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, } memcpy(mypkt->data, buf, nbytes); - err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt); + __skb_queue_head_init(&pktq); + __skb_queue_tail(&pktq, mypkt); + err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq); + __skb_dequeue_tail(&pktq); brcmu_pkt_buf_free_skb(mypkt); return err; @@ -611,22 +615,18 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff *pkt) + uint flags, struct sk_buff_head *pktq) { uint width; int err = 0; - struct sk_buff_head pkt_list; brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", - fn, addr, pkt->len); + fn, addr, pktq->qlen); width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; brcmf_sdio_addrprep(sdiodev, width, &addr); - skb_queue_head_init(&pkt_list); - skb_queue_tail(&pkt_list, pkt); - err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list); - skb_dequeue_tail(&pkt_list); + err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq); return err; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index db31312..aa4caca 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -510,7 +510,6 @@ struct brcmf_sdio { #ifdef DEBUG static int qcount[NUMPRIO]; -static int tx_packets[NUMPRIO]; #endif /* DEBUG */ #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */ @@ -1759,85 +1758,185 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus) return; } +/* flag marking a dummy skb added for DMA alignment requirement */ +#define DUMMY_SKB_FLAG 0x10000 +/* bit mask of data length chopped from the previous packet */ +#define DUMMY_SKB_CHOP_LEN_MASK 0xffff +/** + * brcmf_sdio_txpkt_prep - packet preparation for transmit + * @bus: brcmf_sdio structure pointer + * @pktq: packet list pointer + * @chan: virtual channel to transmit the packet + * + * Processes to be applied to the packet + * - Align data buffer pointer + * - Align data buffer length + * - Prepare header + * Return: negative value if there is error + */ +static int +brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, + uint chan) +{ + u16 head_pad, tail_pad, tail_chop, pkt_len; + u16 head_align, sg_align; + u32 sw_header; + int ntail; + struct sk_buff *pkt_next, *pkt_new; + u8 *dat_buf; + unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize; + + /* SDIO ADMA requires at least 32 bit alignment */ + head_align = 4; + sg_align = 4; + if (bus->sdiodev->pdata) { + head_align = bus->sdiodev->pdata->sd_head_align > 4 ? + bus->sdiodev->pdata->sd_head_align : 4; + sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ? + bus->sdiodev->pdata->sd_sgentry_align : 4; + } + /* sg entry alignment should be a divisor of block size */ + WARN_ON(blksize % sg_align); + + pkt_next = pktq->next; + dat_buf = (u8 *)(pkt_next->data); + + /* Check head padding */ + head_pad = ((unsigned long)dat_buf % head_align); + if (head_pad) { + if (skb_headroom(pkt_next) < head_pad) { + bus->sdiodev->bus_if->tx_realloc++; + head_pad = 0; + if (skb_cow(pkt_next, head_pad)) + return -ENOMEM; + } + skb_push(pkt_next, head_pad); + dat_buf = (u8 *)(pkt_next->data); + memset(dat_buf, 0, head_pad + SDPCM_HDRLEN); + } + + /* Check tail padding */ + pkt_new = NULL; + tail_chop = pkt_next->len % sg_align; + tail_pad = sg_align - tail_chop; + tail_pad += blksize - (pkt_next->len + tail_pad) % blksize; + if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) { + pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop); + if (pkt_new == NULL) + return -ENOMEM; + memcpy(pkt_new->data, + pkt_next->data + pkt_next->len - tail_chop, + tail_chop); + *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop; + skb_trim(pkt_next, pkt_next->len - tail_chop); + __skb_queue_after(pktq, pkt_next, pkt_new); + } else { + ntail = pkt_next->data_len + tail_pad - + (pkt_next->end - pkt_next->tail); + if (skb_cloned(pkt_next) || ntail > 0) + if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC)) + return -ENOMEM; + if (skb_linearize(pkt_next)) + return -ENOMEM; + dat_buf = (u8 *)(pkt_next->data); + __skb_put(pkt_next, tail_pad); + } + + /* Now prep the header */ + /* 4 bytes hardware header (frame tag) + * Byte 0~1: Frame length + * Byte 2~3: Checksum, bit-wise inverse of frame length + */ + if (pkt_new) + pkt_len = pkt_next->len + tail_chop; + else + pkt_len = pkt_next->len - tail_pad; + *(__le16 *)dat_buf = cpu_to_le16(pkt_len); + *(((__le16 *)dat_buf) + 1) = cpu_to_le16(~pkt_len); + /* 8 bytes software header + * Byte 0: Tx sequence number + * Byte 1: 4 MSB Channel number + * Byte 2: Reserved + * Byte 3: Data offset + * Byte 4~7: Reserved + */ + sw_header = bus->tx_seq; + sw_header |= ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK); + sw_header |= ((head_pad + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & + SDPCM_DOFFSET_MASK; + *(((__le32 *)dat_buf) + 1) = cpu_to_le32(sw_header); + *(((__le32 *)dat_buf) + 2) = 0; + + if (BRCMF_BYTES_ON() && + ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) || + (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL))) + brcmf_dbg_hex_dump(true, pkt_next, pkt_len, "Tx Frame:\n"); + else if (BRCMF_HDRS_ON()) + brcmf_dbg_hex_dump(true, pkt_next, head_pad + SDPCM_HDRLEN, + "Tx Header:\n"); + + return 0; +} + +/** + * brcmf_sdio_txpkt_postp - packet post processing for transmit + * @bus: brcmf_sdio structure pointer + * @pktq: packet list pointer + * + * Processes to be applied to the packet + * - Remove head padding + * - Remove tail padding + */ +static void +brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq) +{ + u8 *hdr; + u32 dat_offset; + u32 dummy_flags, chop_len; + struct sk_buff *pkt_next, *tmp, *pkt_prev; + + skb_queue_walk_safe(pktq, pkt_next, tmp) { + dummy_flags = *(u32 *)(pkt_next->cb); + if (dummy_flags & DUMMY_SKB_FLAG) { + chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK; + if (chop_len) { + pkt_prev = pkt_next->prev; + memcpy(pkt_prev->data + pkt_prev->len, + pkt_next->data, chop_len); + skb_put(pkt_prev, chop_len); + } + __skb_unlink(pkt_next, pktq); + brcmu_pkt_buf_free_skb(pkt_next); + } else { + hdr = pkt_next->data + SDPCM_FRAMETAG_LEN; + dat_offset = le32_to_cpu(*(__le32 *)hdr); + dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >> + SDPCM_DOFFSET_SHIFT; + skb_pull(pkt_next, dat_offset); + } + } +} + /* Writes a HW/SW header into the packet and sends it. */ /* Assumes: (a) header space already there, (b) caller holds lock */ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, uint chan) { int ret; - u8 *frame; - u16 len, pad = 0; - u32 swheader; int i; + struct sk_buff_head localq; brcmf_dbg(TRACE, "Enter\n"); - frame = (u8 *) (pkt->data); - - /* Add alignment padding, allocate new packet if needed */ - pad = ((unsigned long)frame % BRCMF_SDALIGN); - if (pad) { - if (skb_headroom(pkt) < pad) { - brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n", - skb_headroom(pkt), pad); - bus->sdiodev->bus_if->tx_realloc++; - ret = skb_cow(pkt, BRCMF_SDALIGN); - if (ret) - goto done; - pad = ((unsigned long)frame % BRCMF_SDALIGN); - } - skb_push(pkt, pad); - frame = (u8 *) (pkt->data); - memset(frame, 0, pad + SDPCM_HDRLEN); - } - /* precondition: pad < BRCMF_SDALIGN */ - - /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ - len = (u16) (pkt->len); - *(__le16 *) frame = cpu_to_le16(len); - *(((__le16 *) frame) + 1) = cpu_to_le16(~len); - - /* Software tag: channel, sequence number, data offset */ - swheader = - ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq | - (((pad + - SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); - - *(((__le32 *) frame) + 1) = cpu_to_le32(swheader); - *(((__le32 *) frame) + 2) = 0; - -#ifdef DEBUG - tx_packets[pkt->priority]++; -#endif - - brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && - ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) || - (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)), - frame, len, "Tx Frame:\n"); - brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && - ((BRCMF_CTL_ON() && - chan == SDPCM_CONTROL_CHANNEL) || - (BRCMF_DATA_ON() && - chan != SDPCM_CONTROL_CHANNEL))) && - BRCMF_HDRS_ON(), - frame, min_t(u16, len, 16), "TxHdr:\n"); - - /* Raise len to next SDIO block to eliminate tail command */ - if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { - u16 pad = bus->blocksize - (len % bus->blocksize); - if ((pad <= bus->roundup) && (pad < bus->blocksize)) - len += pad; - } else if (len % BRCMF_SDALIGN) { - len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN); - } - - /* Some controllers have trouble with odd bytes -- round to even */ - if (len & (ALIGNMENT - 1)) - len = roundup(len, ALIGNMENT); + __skb_queue_head_init(&localq); + __skb_queue_tail(&localq, pkt); + ret = brcmf_sdio_txpkt_prep(bus, &localq, chan); + if (ret) + goto done; sdio_claim_host(bus->sdiodev->func[1]); ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, - SDIO_FUNC_2, F2SYNC, pkt); + SDIO_FUNC_2, F2SYNC, &localq); bus->sdcnt.f2txdata++; if (ret < 0) { @@ -1868,8 +1967,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; done: - /* restore pkt buffer pointer before calling tx complete routine */ - skb_pull(pkt, SDPCM_HDRLEN + pad); + brcmf_sdio_txpkt_postp(bus, &localq); + __skb_dequeue_tail(&localq); brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0); return ret; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h index 09786a5..2b5407f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h @@ -208,7 +208,7 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, */ extern int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, - uint flags, struct sk_buff *pkt); + uint flags, struct sk_buff_head *pktq); extern int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, uint flags, u8 *buf, uint nbytes); diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h index b717499..e75dcbf 100644 --- a/include/linux/platform_data/brcmfmac-sdio.h +++ b/include/linux/platform_data/brcmfmac-sdio.h @@ -94,6 +94,10 @@ void __init brcmfmac_init_pdata(void) * Set this to true if the SDIO host controller has higher align requirement * than 32 bytes for each scatterlist item. * + * sd_head_align: alignment requirement for start of data buffer + * + * sd_sgentry_align: length alignment requirement for each sg entry + * * power_on: This function is called by the brcmfmac when the module gets * loaded. This can be particularly useful for low power devices. The platform * spcific routine may for example decide to power up the complete device. @@ -121,6 +125,8 @@ struct brcmfmac_sdio_platform_data { unsigned int oob_irq_nr; unsigned long oob_irq_flags; bool broken_sg_support; + unsigned short sd_head_align; + unsigned short sd_sgentry_align; void (*power_on)(void); void (*power_off)(void); void (*reset)(void); -- cgit v0.10.2 From cb7f79682a9289634094e2c3b5180611635332f5 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Sat, 10 Aug 2013 12:27:27 +0200 Subject: brcmfmac: remove align from brcmf_bus structure remove align from brcmf_bus since it is only used by sdio bus layer internally Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 289e386..64f4a2b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -350,7 +350,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, sdiodev->bus_if = bus_if; bus_if->bus_priv.sdio = sdiodev; - bus_if->align = BRCMF_SDALIGN; dev_set_drvdata(&func->dev, bus_if); dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); sdiodev->dev = &sdiodev->func[1]->dev; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index 9249b6d..f7c1985 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h @@ -69,7 +69,6 @@ struct brcmf_bus_ops { * @maxctl: maximum size for rxctl request message. * @tx_realloc: number of tx packets realloced for headroom. * @dstats: dongle-based statistical data. - * @align: alignment requirement for the bus. * @dcmd_list: bus/device specific dongle initialization commands. * @chip: device identifier of the dongle chip. * @chiprev: revision of the dongle chip. @@ -84,7 +83,6 @@ struct brcmf_bus { enum brcmf_bus_state state; uint maxctl; unsigned long tx_realloc; - u8 align; u32 chip; u32 chiprev; struct list_head dcmd_list; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index aa4caca..2563875 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -1166,7 +1166,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) { u16 dlen, totlen; u8 *dptr, num = 0; - + u32 align = 0; u16 sublen; struct sk_buff *pfirst, *pnext; @@ -1181,6 +1181,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) brcmf_dbg(SDIO, "start: glomd %p glom %p\n", bus->glomd, skb_peek(&bus->glom)); + if (bus->sdiodev->pdata) + align = bus->sdiodev->pdata->sd_sgentry_align; + if (align < 4) + align = 4; + /* If there's a descriptor, generate the packet chain */ if (bus->glomd) { pfirst = pnext = NULL; @@ -1204,9 +1209,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) pnext = NULL; break; } - if (sublen % BRCMF_SDALIGN) { + if (sublen % align) { brcmf_err("sublen %d not multiple of %d\n", - sublen, BRCMF_SDALIGN); + sublen, align); } totlen += sublen; @@ -1219,7 +1224,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } /* Allocate/chain packet for next subframe */ - pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN); + pnext = brcmu_pkt_buf_get_skb(sublen + align); if (pnext == NULL) { brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n", num, sublen); @@ -1228,7 +1233,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) skb_queue_tail(&bus->glom, pnext); /* Adhere to start alignment requirements */ - pkt_align(pnext, sublen, BRCMF_SDALIGN); + pkt_align(pnext, sublen, align); } /* If all allocations succeeded, save packet chain @@ -3832,7 +3837,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) struct brcmf_sdio *bus; struct brcmf_bus_dcmd *dlst; u32 dngl_txglom; - u32 dngl_txglomalign; + u32 txglomalign = 0; u8 idx; brcmf_dbg(TRACE, "Enter\n"); @@ -3926,9 +3931,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) dlst->param_len = sizeof(u32); } else { /* otherwise, set txglomalign */ - dngl_txglomalign = bus->sdiodev->bus_if->align; + if (sdiodev->pdata) + txglomalign = sdiodev->pdata->sd_sgentry_align; + /* SDIO ADMA requires at least 32 bit alignment */ + if (txglomalign < 4) + txglomalign = 4; dlst->name = "bus:txglomalign"; - dlst->param = (char *)&dngl_txglomalign; + dlst->param = (char *)&txglomalign; dlst->param_len = sizeof(u32); } list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list); -- cgit v0.10.2 From 6bc52319c2c688fdcbb203a4917576ca4cd4d51e Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Sat, 10 Aug 2013 12:27:28 +0200 Subject: brcmfmac: streamline sdio bus header code Streamlining sdio bus specific header related code as preparation for host tx glomming Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 2563875..02ab5cd 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -201,13 +201,6 @@ struct rte_console { #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */ #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */ -/* HW frame tag */ -#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */ - -/* Total length of frame header for dongle protocol */ -#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN) -#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN) - /* * Software allocation of To SB Mailbox resources */ @@ -250,38 +243,6 @@ struct rte_console { /* Current protocol version */ #define SDPCM_PROT_VERSION 4 -/* SW frame header */ -#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff) - -#define SDPCM_CHANNEL_MASK 0x00000f00 -#define SDPCM_CHANNEL_SHIFT 8 -#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f) - -#define SDPCM_NEXTLEN_OFFSET 2 - -/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ -#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ -#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) -#define SDPCM_DOFFSET_MASK 0xff000000 -#define SDPCM_DOFFSET_SHIFT 24 -#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ -#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff) -#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ -#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) - -#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ - -/* logical channel numbers */ -#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */ -#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ -#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ -#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */ -#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ - -#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */ - -#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80) - /* * Shared structure between dongle and the host. * The structure contains pointers to trap or assert information. @@ -396,8 +357,8 @@ struct sdpcm_shared_le { __le32 brpt_addr; }; -/* SDIO read frame info */ -struct brcmf_sdio_read { +/* dongle SDIO bus specific header info */ +struct brcmf_sdio_hdrinfo { u8 seq_num; u8 channel; u16 len; @@ -431,7 +392,7 @@ struct brcmf_sdio { u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN]; u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ u8 rx_seq; /* Receive sequence number (expected) */ - struct brcmf_sdio_read cur_read; + struct brcmf_sdio_hdrinfo cur_read; /* info of current read frame */ bool rxskip; /* Skip receive (awaiting NAK ACK) */ bool rxpending; /* Data frame pending in dongle */ @@ -1042,18 +1003,64 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus) } } -static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, - struct brcmf_sdio_read *rd, - enum brcmf_sdio_frmtype type) +/** + * brcmfmac sdio bus specific header + * This is the lowest layer header wrapped on the packets transmitted between + * host and WiFi dongle which contains information needed for SDIO core and + * firmware + * + * It consists of 2 parts: hw header and software header + * hardware header (frame tag) - 4 bytes + * Byte 0~1: Frame length + * Byte 2~3: Checksum, bit-wise inverse of frame length + * software header - 8 bytes + * Byte 0: Rx/Tx sequence number + * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag + * Byte 2: Length of next data frame, reserved for Tx + * Byte 3: Data offset + * Byte 4: Flow control bits, reserved for Tx + * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet + * Byte 6~7: Reserved + */ +#define SDPCM_HWHDR_LEN 4 +#define SDPCM_SWHDR_LEN 8 +#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN) +#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN) +/* software header */ +#define SDPCM_SEQ_MASK 0x000000ff +#define SDPCM_SEQ_WRAP 256 +#define SDPCM_CHANNEL_MASK 0x00000f00 +#define SDPCM_CHANNEL_SHIFT 8 +#define SDPCM_CONTROL_CHANNEL 0 /* Control */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */ +#define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */ +#define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */ +#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80) +#define SDPCM_NEXTLEN_MASK 0x00ff0000 +#define SDPCM_NEXTLEN_SHIFT 16 +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 +#define SDPCM_FCMASK_MASK 0x000000ff +#define SDPCM_WINDOW_MASK 0x0000ff00 +#define SDPCM_WINDOW_SHIFT 8 + +static inline u8 brcmf_sdio_getdatoffset(u8 *swheader) +{ + u32 hdrvalue; + hdrvalue = *(u32 *)swheader; + return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT); +} + +static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header, + struct brcmf_sdio_hdrinfo *rd, + enum brcmf_sdio_frmtype type) { u16 len, checksum; u8 rx_seq, fc, tx_seq_max; + u32 swheader; - /* - * 4 bytes hardware header (frame tag) - * Byte 0~1: Frame length - * Byte 2~3: Checksum, bit-wise inverse of frame length - */ + /* hw header */ len = get_unaligned_le16(header); checksum = get_unaligned_le16(header + sizeof(u16)); /* All zero means no more to read */ @@ -1082,24 +1089,16 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, } rd->len = len; - /* - * 8 bytes hardware header - * Byte 0: Rx sequence number - * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag - * Byte 2: Length of next data frame - * Byte 3: Data offset - * Byte 4: Flow control bits - * Byte 5: Maximum Sequence number allow for Tx - * Byte 6~7: Reserved - */ - if (type == BRCMF_SDIO_FT_SUPER && - SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) { + /* software header */ + header += SDPCM_HWHDR_LEN; + swheader = le32_to_cpu(*(__le32 *)header); + if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) { brcmf_err("Glom descriptor found in superframe head\n"); rd->len = 0; return -EINVAL; } - rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]); - rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]); + rx_seq = (u8)(swheader & SDPCM_SEQ_MASK); + rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT; if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL && type != BRCMF_SDIO_FT_SUPER) { brcmf_err("HW header length too long\n"); @@ -1119,7 +1118,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, rd->len = 0; return -EINVAL; } - rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]); + rd->dat_offset = brcmf_sdio_getdatoffset(header); if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) { brcmf_err("seq %d: bad data offset\n", rx_seq); bus->sdcnt.rx_badhdr++; @@ -1136,14 +1135,15 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, /* no need to check the reset for subframe */ if (type == BRCMF_SDIO_FT_SUB) return 0; - rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT; if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) { /* only warm for NON glom packet */ if (rd->channel != SDPCM_GLOM_CHANNEL) brcmf_err("seq %d: next length error\n", rx_seq); rd->len_nxtfrm = 0; } - fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]); + swheader = le32_to_cpu(*(__le32 *)(header + 4)); + fc = swheader & SDPCM_FCMASK_MASK; if (bus->flowcontrol != fc) { if (~bus->flowcontrol & fc) bus->sdcnt.fc_xoff++; @@ -1152,7 +1152,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, bus->sdcnt.fc_rcvd++; bus->flowcontrol = fc; } - tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]); + tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT; if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) { brcmf_err("seq %d: max tx seq number error\n", rx_seq); tx_seq_max = bus->tx_seq + 2; @@ -1162,6 +1162,28 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, return 0; } +static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length) +{ + *(__le16 *)header = cpu_to_le16(frm_length); + *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length); +} + +static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header, + struct brcmf_sdio_hdrinfo *hd_info) +{ + u32 sw_header; + + brcmf_sdio_update_hwhdr(header, hd_info->len); + + sw_header = bus->tx_seq; + sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) & + SDPCM_CHANNEL_MASK; + sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) & + SDPCM_DOFFSET_MASK; + *(((__le32 *)header) + 1) = cpu_to_le32(sw_header); + *(((__le32 *)header) + 2) = 0; +} + static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) { u16 dlen, totlen; @@ -1173,7 +1195,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) int errcode; u8 doff, sfdoff; - struct brcmf_sdio_read rd_new; + struct brcmf_sdio_hdrinfo rd_new; /* If packets, issue read(s) and send up packet chain */ /* Return sequence numbers consumed? */ @@ -1309,8 +1331,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) rd_new.seq_num = rxseq; rd_new.len = dlen; sdio_claim_host(bus->sdiodev->func[1]); - errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new, - BRCMF_SDIO_FT_SUPER); + errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new, + BRCMF_SDIO_FT_SUPER); sdio_release_host(bus->sdiodev->func[1]); bus->cur_read.len = rd_new.len_nxtfrm << 4; @@ -1328,8 +1350,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) rd_new.len = pnext->len; rd_new.seq_num = rxseq++; sdio_claim_host(bus->sdiodev->func[1]); - errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new, - BRCMF_SDIO_FT_SUB); + errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new, + BRCMF_SDIO_FT_SUB); sdio_release_host(bus->sdiodev->func[1]); brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), pnext->data, 32, "subframe:\n"); @@ -1361,7 +1383,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) skb_queue_walk_safe(&bus->glom, pfirst, pnext) { dptr = (u8 *) (pfirst->data); sublen = get_unaligned_le16(dptr); - doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]); brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(), dptr, pfirst->len, @@ -1539,7 +1561,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) uint rxleft = 0; /* Remaining number of frames allowed */ int ret; /* Return code from calls */ uint rxcount = 0; /* Total frames read */ - struct brcmf_sdio_read *rd = &bus->cur_read, rd_new; + struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new; u8 head_read = 0; brcmf_dbg(TRACE, "Enter\n"); @@ -1587,8 +1609,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) bus->rxhdr, SDPCM_HDRLEN, "RxHdr:\n"); - if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd, - BRCMF_SDIO_FT_NORMAL)) { + if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd, + BRCMF_SDIO_FT_NORMAL)) { sdio_release_host(bus->sdiodev->func[1]); if (!bus->rxpending) break; @@ -1652,8 +1674,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN); rd_new.seq_num = rd->seq_num; sdio_claim_host(bus->sdiodev->func[1]); - if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new, - BRCMF_SDIO_FT_NORMAL)) { + if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new, + BRCMF_SDIO_FT_NORMAL)) { rd->len = 0; brcmu_pkt_buf_free_skb(pkt); } @@ -1697,7 +1719,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) /* Save superframe descriptor and allocate packet frame */ if (rd->channel == SDPCM_GLOM_CHANNEL) { - if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) { brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n", rd->len); brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), @@ -1783,13 +1805,12 @@ static int brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, uint chan) { - u16 head_pad, tail_pad, tail_chop, pkt_len; - u16 head_align, sg_align; - u32 sw_header; + u16 head_pad, tail_pad, tail_chop, head_align, sg_align; int ntail; struct sk_buff *pkt_next, *pkt_new; u8 *dat_buf; unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize; + struct brcmf_sdio_hdrinfo hd_info = {0}; /* SDIO ADMA requires at least 32 bit alignment */ head_align = 4; @@ -1848,34 +1869,18 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, } /* Now prep the header */ - /* 4 bytes hardware header (frame tag) - * Byte 0~1: Frame length - * Byte 2~3: Checksum, bit-wise inverse of frame length - */ if (pkt_new) - pkt_len = pkt_next->len + tail_chop; + hd_info.len = pkt_next->len + tail_chop; else - pkt_len = pkt_next->len - tail_pad; - *(__le16 *)dat_buf = cpu_to_le16(pkt_len); - *(((__le16 *)dat_buf) + 1) = cpu_to_le16(~pkt_len); - /* 8 bytes software header - * Byte 0: Tx sequence number - * Byte 1: 4 MSB Channel number - * Byte 2: Reserved - * Byte 3: Data offset - * Byte 4~7: Reserved - */ - sw_header = bus->tx_seq; - sw_header |= ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK); - sw_header |= ((head_pad + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & - SDPCM_DOFFSET_MASK; - *(((__le32 *)dat_buf) + 1) = cpu_to_le32(sw_header); - *(((__le32 *)dat_buf) + 2) = 0; + hd_info.len = pkt_next->len - tail_pad; + hd_info.channel = chan; + hd_info.dat_offset = head_pad + SDPCM_HDRLEN; + brcmf_sdio_hdpack(bus, dat_buf, &hd_info); if (BRCMF_BYTES_ON() && ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) || (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL))) - brcmf_dbg_hex_dump(true, pkt_next, pkt_len, "Tx Frame:\n"); + brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n"); else if (BRCMF_HDRS_ON()) brcmf_dbg_hex_dump(true, pkt_next, head_pad + SDPCM_HDRLEN, "Tx Header:\n"); @@ -1913,7 +1918,7 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq) __skb_unlink(pkt_next, pktq); brcmu_pkt_buf_free_skb(pkt_next); } else { - hdr = pkt_next->data + SDPCM_FRAMETAG_LEN; + hdr = pkt_next->data + SDPCM_HWHDR_LEN; dat_offset = le32_to_cpu(*(__le32 *)hdr); dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT; @@ -1969,7 +1974,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, } sdio_release_host(bus->sdiodev->func[1]); if (ret == 0) - bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP; done: brcmf_sdio_txpkt_postp(bus, &localq); @@ -2325,7 +2330,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) } } else { - bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP; } sdio_release_host(bus->sdiodev->func[1]); bus->ctrl_frame_stat = false; @@ -2540,7 +2545,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) return ret; } - bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP; return ret; } @@ -2550,13 +2555,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) { u8 *frame; u16 len; - u32 swheader; uint retries = 0; u8 doff = 0; int ret = -1; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct brcmf_sdio *bus = sdiodev->bus; + struct brcmf_sdio_hdrinfo hd_info = {0}; brcmf_dbg(TRACE, "Enter\n"); @@ -2595,18 +2600,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) brcmf_sdbrcm_bus_sleep(bus, false, false); sdio_release_host(bus->sdiodev->func[1]); - /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ - *(__le16 *) frame = cpu_to_le16((u16) msglen); - *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen); - - /* Software tag: channel, sequence number, data offset */ - swheader = - ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & - SDPCM_CHANNEL_MASK) - | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & - SDPCM_DOFFSET_MASK); - put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN); - put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + hd_info.len = (u16)msglen; + hd_info.channel = SDPCM_CONTROL_CHANNEL; + hd_info.dat_offset = doff; + brcmf_sdio_hdpack(bus, frame, &hd_info); if (!data_ok(bus)) { brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n", @@ -3856,7 +3853,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) bus->txbound = BRCMF_TXBOUND; bus->rxbound = BRCMF_RXBOUND; bus->txminmax = BRCMF_TXMINMAX; - bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; + bus->tx_seq = SDPCM_SEQ_WRAP - 1; INIT_WORK(&bus->datawork, brcmf_sdio_dataworker); bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq"); -- cgit v0.10.2 From 706478cba544584a1f8d515646f8d677d096395e Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Sat, 10 Aug 2013 12:27:29 +0200 Subject: brcmfmac: use configurable sdio bus header length for tx packet Host tx glomming require an extended hardware sdio bus header to store information for dongle. Introduce a variable in struct brcmf_sdio to replace macro SDPCM_HDRLEN Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 02ab5cd..1aa75d5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -461,6 +461,8 @@ struct brcmf_sdio { struct brcmf_sdio_count sdcnt; bool sr_enabled; /* SaveRestore enabled */ bool sleeping; /* SDIO bus sleeping */ + + u8 tx_hdrlen; /* sdio bus header length for tx packet */ }; /* clkstate */ @@ -1025,7 +1027,6 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus) #define SDPCM_HWHDR_LEN 4 #define SDPCM_SWHDR_LEN 8 #define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN) -#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN) /* software header */ #define SDPCM_SEQ_MASK 0x000000ff #define SDPCM_SEQ_WRAP 256 @@ -1838,7 +1839,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, } skb_push(pkt_next, head_pad); dat_buf = (u8 *)(pkt_next->data); - memset(dat_buf, 0, head_pad + SDPCM_HDRLEN); + memset(dat_buf, 0, head_pad + bus->tx_hdrlen); } /* Check tail padding */ @@ -1874,7 +1875,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, else hd_info.len = pkt_next->len - tail_pad; hd_info.channel = chan; - hd_info.dat_offset = head_pad + SDPCM_HDRLEN; + hd_info.dat_offset = head_pad + bus->tx_hdrlen; brcmf_sdio_hdpack(bus, dat_buf, &hd_info); if (BRCMF_BYTES_ON() && @@ -1882,7 +1883,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL))) brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n"); else if (BRCMF_HDRS_ON()) - brcmf_dbg_hex_dump(true, pkt_next, head_pad + SDPCM_HDRLEN, + brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen, "Tx Header:\n"); return 0; @@ -1989,7 +1990,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) u32 intstatus = 0; int ret = 0, prec_out; uint cnt = 0; - uint datalen; u8 tx_prec_map; brcmf_dbg(TRACE, "Enter\n"); @@ -2005,7 +2005,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) break; } spin_unlock_bh(&bus->txqlock); - datalen = pkt->len - SDPCM_HDRLEN; ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL); @@ -2392,7 +2391,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) datalen = pkt->len; /* Add space for the header */ - skb_push(pkt, SDPCM_HDRLEN); + skb_push(pkt, bus->tx_hdrlen); /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */ prec = prio2prec((pkt->priority & PRIOMASK)); @@ -2405,7 +2404,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) /* Priority based enq */ spin_lock_irqsave(&bus->txqlock, flags); if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { - skb_pull(pkt, SDPCM_HDRLEN); + skb_pull(pkt, bus->tx_hdrlen); brcmf_err("out of bus->txq !!!\n"); ret = -ENOSR; } else { @@ -2566,8 +2565,8 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) brcmf_dbg(TRACE, "Enter\n"); /* Back the pointer to make a room for bus header */ - frame = msg - SDPCM_HDRLEN; - len = (msglen += SDPCM_HDRLEN); + frame = msg - bus->tx_hdrlen; + len = (msglen += bus->tx_hdrlen); /* Add alignment padding (optional for ctl frames) */ doff = ((unsigned long)frame % BRCMF_SDALIGN); @@ -2575,10 +2574,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) frame -= doff; len += doff; msglen += doff; - memset(frame, 0, doff + SDPCM_HDRLEN); + memset(frame, 0, doff + bus->tx_hdrlen); } /* precondition: doff < BRCMF_SDALIGN */ - doff += SDPCM_HDRLEN; + doff += bus->tx_hdrlen; /* Round send length to next SDIO block */ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { @@ -3895,8 +3894,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) bus->sdiodev->bus_if->chip = bus->ci->chip; bus->sdiodev->bus_if->chiprev = bus->ci->chiprev; - /* Attach to the brcmf/OS/network interface */ - ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev); + /* default sdio bus header length for tx packet */ + bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; + + /* Attach to the common layer, reserve hdr space */ + ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev); if (ret != 0) { brcmf_err("brcmf_attach failed\n"); goto fail; -- cgit v0.10.2 From e96542e55a2aacf4bdeccfe2f17b77c4895b4df2 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 10 Aug 2013 15:59:15 +0200 Subject: ath9k: fix rx descriptor related race condition Similar to a race condition that exists in the tx path, the hardware might re-read the 'next' pointer of a descriptor of the last completed frame. This only affects non-EDMA (pre-AR93xx) devices. To deal with this race, defer clearing and re-linking a completed rx descriptor until the next one has been processed. Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 505c615..0d69b13 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -79,10 +79,6 @@ struct ath_config { sizeof(struct ath_buf_state)); \ } while (0) -#define ATH_RXBUF_RESET(_bf) do { \ - (_bf)->bf_stale = false; \ - } while (0) - /** * enum buffer_type - Buffer type flags * @@ -315,6 +311,7 @@ struct ath_rx { struct ath_descdma rxdma; struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; + struct ath_buf *buf_hold; struct sk_buff *frag; u32 ampdu_ref; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 62dff97..2dd851a 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) struct ath_desc *ds; struct sk_buff *skb; - ATH_RXBUF_RESET(bf); - ds = bf->bf_desc; ds->ds_link = 0; /* link to null */ ds->ds_data = bf->bf_buf_addr; @@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) sc->rx.rxlink = &ds->ds_link; } +static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf) +{ + if (sc->rx.buf_hold) + ath_rx_buf_link(sc, sc->rx.buf_hold); + + sc->rx.buf_hold = bf; +} + static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) { /* XXX block beacon interrupts */ @@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc, skb = bf->bf_mpdu; - ATH_RXBUF_RESET(bf); memset(skb->data, 0, ah->caps.rx_status_len); dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, ah->caps.rx_status_len, DMA_TO_DEVICE); @@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc) if (list_empty(&sc->rx.rxbuf)) goto start_recv; + sc->rx.buf_hold = NULL; sc->rx.rxlink = NULL; list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { ath_rx_buf_link(sc, bf); @@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, } bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); + if (bf == sc->rx.buf_hold) + return NULL; + ds = bf->bf_desc; /* @@ -1387,7 +1396,7 @@ requeue: if (edma) { ath_rx_edma_buf_link(sc, qtype); } else { - ath_rx_buf_link(sc, bf); + ath_rx_buf_relink(sc, bf); ath9k_hw_rxena(ah); } } while (1); -- cgit v0.10.2 From 50676b811148314f43f0d874502fe9ac5f7d686d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 10 Aug 2013 15:59:16 +0200 Subject: ath9k: shrink a few data structures by reordering fields Also reduce the size of a few fields where possible Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 0d69b13..7b1d036 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -72,7 +72,6 @@ struct ath_config { /*************************/ #define ATH_TXBUF_RESET(_bf) do { \ - (_bf)->bf_stale = false; \ (_bf)->bf_lastbf = NULL; \ (_bf)->bf_next = NULL; \ memset(&((_bf)->bf_state), 0, \ @@ -192,10 +191,10 @@ struct ath_txq { struct ath_atx_ac { struct ath_txq *txq; - int sched; struct list_head list; struct list_head tid_q; bool clear_ps_filter; + bool sched; }; struct ath_frame_info { @@ -212,6 +211,7 @@ struct ath_buf_state { u8 bf_type; u8 bfs_paprd; u8 ndelim; + bool stale; u16 seqno; unsigned long bfs_paprd_timestamp; }; @@ -225,7 +225,6 @@ struct ath_buf { void *bf_desc; /* virtual addr of desc */ dma_addr_t bf_daddr; /* physical addr of desc */ dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */ - bool bf_stale; struct ieee80211_tx_rate rates[4]; struct ath_buf_state bf_state; }; @@ -237,13 +236,14 @@ struct ath_atx_tid { struct ath_node *an; struct ath_atx_ac *ac; unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; - int bar_index; u16 seq_start; u16 seq_next; u16 baw_size; - int tidno; + u8 tidno; int baw_head; /* first un-acked tx buffer */ int baw_tail; /* next unused tx buffer slot */ + + s8 bar_index; bool sched; bool paused; bool active; @@ -255,10 +255,10 @@ struct ath_node { struct ieee80211_vif *vif; /* interface with which we're associated */ struct ath_atx_tid tid[IEEE80211_NUM_TIDS]; struct ath_atx_ac ac[IEEE80211_NUM_ACS]; - int ps_key; u16 maxampdu; u8 mpdudensity; + s8 ps_key; bool sleeping; bool no_ps_filter; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d8dfb3e..4fc80e3 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -493,7 +493,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, while (bf) { bf_next = bf->bf_next; - if (!bf->bf_stale || bf_next != NULL) + if (!bf->bf_state.stale || bf_next != NULL) list_move_tail(&bf->list, &bf_head); ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); @@ -586,7 +586,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * not a holding desc. */ INIT_LIST_HEAD(&bf_head); - if (bf_next != NULL || !bf_last->bf_stale) + if (bf_next != NULL || !bf_last->bf_state.stale) list_move_tail(&bf->list, &bf_head); if (!txpending) { @@ -610,7 +610,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ieee80211_sta_eosp(sta); } /* retry the un-acked ones */ - if (bf->bf_next == NULL && bf_last->bf_stale) { + if (bf->bf_next == NULL && bf_last->bf_state.stale) { struct ath_buf *tbf; tbf = ath_clone_txbuf(sc, bf_last); @@ -1734,7 +1734,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, while (!list_empty(list)) { bf = list_first_entry(list, struct ath_buf, list); - if (bf->bf_stale) { + if (bf->bf_state.stale) { list_del(&bf->list); ath_tx_return_buffer(sc, bf); @@ -2490,7 +2490,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) * it with the STALE flag. */ bf_held = NULL; - if (bf->bf_stale) { + if (bf->bf_state.stale) { bf_held = bf; if (list_is_last(&bf_held->list, &txq->axq_q)) break; @@ -2514,7 +2514,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) * however leave the last descriptor back as the holding * descriptor for hw. */ - lastbf->bf_stale = true; + lastbf->bf_state.stale = true; INIT_LIST_HEAD(&bf_head); if (!list_is_singular(&lastbf->list)) list_cut_position(&bf_head, @@ -2585,7 +2585,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) } bf = list_first_entry(fifo_list, struct ath_buf, list); - if (bf->bf_stale) { + if (bf->bf_state.stale) { list_del(&bf->list); ath_tx_return_buffer(sc, bf); bf = list_first_entry(fifo_list, struct ath_buf, list); @@ -2607,7 +2607,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) ath_tx_txqaddbuf(sc, txq, &bf_q, true); } } else { - lastbf->bf_stale = true; + lastbf->bf_state.stale = true; if (bf != lastbf) list_cut_position(&bf_head, fifo_list, lastbf->list.prev); -- cgit v0.10.2 From f5bde5b8524fb2b8584af3750dbffda6557234e6 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 13 Aug 2013 12:33:26 +0200 Subject: ath9k: remove ath9k_sta_remove_debugfs mac80211 uses debugfs_remove_recursive, so there's no need for the driver to do an explicit cleanup of its sta debugfs entry. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 7b1d036..df1c495 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -262,10 +262,6 @@ struct ath_node { bool sleeping; bool no_ps_filter; - -#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS) - struct dentry *node_stat; -#endif }; struct ath_tx_control { diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index c10cec5..e5c8333 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1725,17 +1725,7 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, struct dentry *dir) { struct ath_node *an = (struct ath_node *)sta->drv_priv; - an->node_stat = debugfs_create_file("node_stat", S_IRUGO, - dir, an, &fops_node_stat); -} - -void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct dentry *dir) -{ - struct ath_node *an = (struct ath_node *)sta->drv_priv; - debugfs_remove(an->node_stat); + debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat); } /* Ethtool support for get-stats */ diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 01c5c6a..6e1556f 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -292,10 +292,6 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); -void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct dentry *dir); void ath_debug_send_fft_sample(struct ath_softc *sc, struct fft_sample_tlv *fft_sample); void ath9k_debug_stat_ant(struct ath_softc *sc, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 911744f..0bee105 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2364,7 +2364,6 @@ struct ieee80211_ops ath9k_ops = { #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS) .sta_add_debugfs = ath9k_sta_add_debugfs, - .sta_remove_debugfs = ath9k_sta_remove_debugfs, #endif .sw_scan_start = ath9k_sw_scan_start, .sw_scan_complete = ath9k_sw_scan_complete, -- cgit v0.10.2 From ca6d4a74c4dea24277d4ce2df4940da6003a2a16 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 13 Aug 2013 12:33:27 +0200 Subject: ath9k: simplify debugfs chainmask handling Writing to that file is unnecessary and quirky, the antenna API should be used instead. Use debugfs_create_u8 to allow reading the values. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index e5c8333..c088744 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -88,90 +88,6 @@ static const struct file_operations fops_debug = { #define DMA_BUF_LEN 1024 -static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - struct ath_hw *ah = sc->sc_ah; - char buf[32]; - unsigned int len; - - len = sprintf(buf, "0x%08x\n", ah->txchainmask); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - struct ath_hw *ah = sc->sc_ah; - unsigned long mask; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &mask)) - return -EINVAL; - - ah->txchainmask = mask; - ah->caps.tx_chainmask = mask; - return count; -} - -static const struct file_operations fops_tx_chainmask = { - .read = read_file_tx_chainmask, - .write = write_file_tx_chainmask, - .open = simple_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - - -static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - struct ath_hw *ah = sc->sc_ah; - char buf[32]; - unsigned int len; - - len = sprintf(buf, "0x%08x\n", ah->rxchainmask); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - struct ath_hw *ah = sc->sc_ah; - unsigned long mask; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &mask)) - return -EINVAL; - - ah->rxchainmask = mask; - ah->caps.rx_chainmask = mask; - return count; -} - -static const struct file_operations fops_rx_chainmask = { - .read = read_file_rx_chainmask, - .write = write_file_rx_chainmask, - .open = simple_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; static ssize_t read_file_ani(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -1896,10 +1812,10 @@ int ath9k_init_debug(struct ath_hw *ah) &fops_reset); debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_recv); - debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_rx_chainmask); - debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_tx_chainmask); + debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy, + &ah->rxchainmask); + debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy, + &ah->txchainmask); debugfs_create_file("ani", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_ani); debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, -- cgit v0.10.2 From a1c781bb20ac1e03280e420abd47a99eb8bbdd3b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 13 Aug 2013 12:33:28 +0200 Subject: ath9k: avoid accessing MRC registers on single-chain devices They are not implemented, and accessing them might trigger errors Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 39c3730..18a5aa4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1172,6 +1172,10 @@ skip_ws_det: * is_on == 0 means MRC CCK is OFF (more noise imm) */ bool is_on = param ? 1 : 0; + + if (ah->caps.rx_chainmask == 1) + break; + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, AR_PHY_MRC_CCK_ENABLE, is_on); REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, -- cgit v0.10.2 From 8f536b87950aa5a5c2d29eb684ed6813e1f9e800 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 13 Aug 2013 12:33:29 +0200 Subject: ath9k: simplify ath_tid_drain ath_tid_drain is only called when a station entry is being removed, so there is no point in still tracking BAW state. Remove some unnecessary code and a bogus TODO comment related to this. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 4fc80e3..cb06c1c 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -312,12 +312,6 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, } } -/* - * TODO: For frame(s) that are in the retry state, we will reuse the - * sequence number(s) without setting the retry bit. The - * alternative is to give up on these and BAR the receiver's window - * forward. - */ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid) @@ -341,14 +335,8 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, } list_add_tail(&bf->list, &bf_head); - - ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); } - - tid->seq_next = tid->seq_start; - tid->baw_tail = tid->baw_head; - tid->bar_index = -1; } static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, -- cgit v0.10.2 From 563299d82fb2492e8d2390b94b00b668feebb229 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 13 Aug 2013 12:33:30 +0200 Subject: ath9k: reset buffer stale flag in ath_tx_get_tid_subframe If that flag stays set for a buffer that already ran through the tx path once, it might cause issues in tx completion processing. Better clear it early to ensure that this does not happen Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index cb06c1c..7223e30 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -888,6 +888,8 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, bf = fi->bf; if (!fi->bf) bf = ath_tx_setup_buffer(sc, txq, tid, skb); + else + bf->bf_state.stale = false; if (!bf) { __skb_unlink(skb, *q); -- cgit v0.10.2 From 4a68ab100f24eca34c5d79b937df4dbd0cbc0b4a Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Tue, 13 Aug 2013 15:25:32 +0300 Subject: wil6210: let IP stack re-check HW TCP/UDP csum errors Fix for TCP iperf from Windows to Linux stall after about 1sec Hardware reports false errors in some situations: Microsoft IP stack, in violation of RFC 1624, set TCP checksum that should be 0x0 as 0xffff. hardware report Rx csum error. If HW csum absolutely trusted, this frame can be never received, as re-transmitted one will have same csum problem. In addition, it mess up block ack reorder buffer, as if packet dropped, it is not score boarded there. Signed-off-by: Vladimir Kondratiev Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index ea1abeb..d505b26 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -416,13 +416,13 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, */ if (d->dma.status & RX_DMA_STATUS_L4_IDENT) { /* L4 protocol identified, csum calculated */ - if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) { + if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - wil_err(wil, "Incorrect checksum reported\n"); - kfree_skb(skb); - return NULL; - } + /* If HW reports bad checksum, let IP stack re-check it + * For example, HW don't understand Microsoft IP stack that + * mis-calculates TCP checksum - if it should be 0x0, + * it writes 0xffff in violation of RFC 1624 + */ } ds_bits = wil_rxdesc_ds_bits(d); -- cgit v0.10.2 From 2b721118b7821107757eb1d37af4b60e877b27e7 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 13 Aug 2013 21:10:19 +0200 Subject: ath9k_htc: do not use bulk on EP3 and EP4 If usb auto suspend is enabled or system run in to suspend/resume cycle, ath9k-htc adapter will stop to response. It is reproducible on xhci HCs. Host part of problem: XHCI do timing calculation based on Transfer Type and bInterval, immediately after device was detected. Ath9k-htc try to overwrite this parameters on module probe and some changes in FW, since we do not initiate usb reset from the driver this changes are not took to account. So, before any kind of suspend or reset, host controller will operate with old parameters. Only after suspend/resume and if interface id stay unchanged, new parameters will by applied. Host will send bulk data with no intervals (?), which will cause overflow on FIFO of EP4. Firmware part of problem: By default, ath9k-htc adapters configured with EP3 and EP4 as interrupt endpoints. Current firmware will try to overwrite ConfigDescriptor to make EP3 and EP4 bulk. FIFO for this endpoints stay not reconfigured, so under the hood it is still Int EP. This patch is revert of 4a0e8ecca4ee commit which trying to reduce CPU usage on some systems. Since it will produce more bug as fixes, we will need to find other way to fix it. here is comment from kernel source which has some more explanation: * Some buggy high speed devices have bulk endpoints using * maxpacket sizes other than 512. High speed HCDs may not * be able to handle that particular bug, so let's warn... in our case EP3 and EP4 have maxpacket sizes = 64!!! Signed-off-by: Oleksij Rempel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 5205a36..6d5d716 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -115,10 +115,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev, cmd->skb = skb; cmd->hif_dev = hif_dev; - usb_fill_bulk_urb(urb, hif_dev->udev, - usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE), + usb_fill_int_urb(urb, hif_dev->udev, + usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE), skb->data, skb->len, - hif_usb_regout_cb, cmd); + hif_usb_regout_cb, cmd, 1); usb_anchor_urb(urb, &hif_dev->regout_submitted); ret = usb_submit_urb(urb, GFP_KERNEL); @@ -723,11 +723,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } - usb_fill_bulk_urb(urb, hif_dev->udev, - usb_rcvbulkpipe(hif_dev->udev, + usb_fill_int_urb(urb, hif_dev->udev, + usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb); + ath9k_hif_usb_reg_in_cb, nskb, 1); } resubmit: @@ -909,11 +909,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) goto err_skb; } - usb_fill_bulk_urb(urb, hif_dev->udev, - usb_rcvbulkpipe(hif_dev->udev, + usb_fill_int_urb(urb, hif_dev->udev, + usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), skb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, skb); + ath9k_hif_usb_reg_in_cb, skb, 1); /* Anchor URB */ usb_anchor_urb(urb, &hif_dev->reg_in_submitted); @@ -1031,9 +1031,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) { - struct usb_host_interface *alt = &hif_dev->interface->altsetting[0]; - struct usb_endpoint_descriptor *endp; - int ret, idx; + int ret; ret = ath9k_hif_usb_download_fw(hif_dev); if (ret) { @@ -1043,20 +1041,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) return ret; } - /* On downloading the firmware to the target, the USB descriptor of EP4 - * is 'patched' to change the type of the endpoint to Bulk. This will - * bring down CPU usage during the scan period. - */ - for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) { - endp = &alt->endpoint[idx].desc; - if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - == USB_ENDPOINT_XFER_INT) { - endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK; - endp->bmAttributes |= USB_ENDPOINT_XFER_BULK; - endp->bInterval = 0; - } - } - /* Alloc URBs */ ret = ath9k_hif_usb_alloc_urbs(hif_dev); if (ret) { @@ -1268,7 +1252,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev) if (!buf) return; - ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE), + ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE), buf, 4, NULL, HZ); if (ret) dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n"); -- cgit v0.10.2 From f6307dda7f0478c721450e6de956fb5c4fdcb6a4 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:09 +0530 Subject: ath9k: Use a subroutine to check for "mybeacon" Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 2dd851a..0c23053 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1160,6 +1160,24 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc, } } +static bool ath9k_is_mybeacon(struct ath_softc *sc, struct sk_buff *skb) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); + + if (ieee80211_is_beacon(hdr->frame_control)) { + RX_STAT_INC(rx_beacons); + if (!is_zero_ether_addr(common->curbssid) && + ether_addr_equal(hdr->addr3, common->curbssid)) + return true; + } + + return false; +} + int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_buf *bf; @@ -1175,7 +1193,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int dma_type; - u8 rx_status_len = ah->caps.rx_status_len; u64 tsf = 0; u32 tsf_lower = 0; unsigned long flags; @@ -1216,18 +1233,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) else hdr_skb = skb; - hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); - rxs = IEEE80211_SKB_RXCB(hdr_skb); - if (ieee80211_is_beacon(hdr->frame_control)) { - RX_STAT_INC(rx_beacons); - if (!is_zero_ether_addr(common->curbssid) && - ether_addr_equal(hdr->addr3, common->curbssid)) - rs.is_mybeacon = true; - else - rs.is_mybeacon = false; - } - else - rs.is_mybeacon = false; + rs.is_mybeacon = ath9k_is_mybeacon(sc, hdr_skb); + + hdr = (struct ieee80211_hdr *) (hdr_skb->data + + ah->caps.rx_status_len); if (ieee80211_is_data_present(hdr->frame_control) && !ieee80211_is_qos_nullfunc(hdr->frame_control)) @@ -1235,6 +1244,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) ath_debug_stat_rx(sc, &rs); + rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; -- cgit v0.10.2 From 0cab329d6037775bda6eee6ed742797c868f09cc Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:10 +0530 Subject: ath9k: Fix phy error handling for DFS Since the DFS code appears to process the phy errors ATH9K_PHYERR_RADAR and ATH9K_PHYERR_FALSE_RADAR_EXT, check for the correct phyerr status in the main RX tasklet routine. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 0c23053..83b3fc5 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1256,10 +1256,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) rxs->mactime += 0x100000000ULL; - if (rs.rs_phyerr == ATH9K_PHYERR_RADAR) + if (rs.rs_status & ATH9K_RXERR_PHY) { ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime); - if (rs.rs_status & ATH9K_RXERR_PHY) { if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) { RX_STAT_INC(rx_spectral); goto requeue_drop_frag; -- cgit v0.10.2 From 5871d2d787bc2fc45e43c9f8ecabdd2db37ad666 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:11 +0530 Subject: ath9k: Discard invalid frames early Frames with invalid or zero length can be discarded early, there is no need to check the crypto bits. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 83b3fc5..f8cc2b3 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -764,7 +764,6 @@ static bool ath9k_rx_accept(struct ath_common *common, bool is_mc, is_valid_tkip, strip_mic, mic_error; struct ath_hw *ah = common->ah; __le16 fc; - u8 rx_status_len = ah->caps.rx_status_len; fc = hdr->frame_control; @@ -786,21 +785,6 @@ static bool ath9k_rx_accept(struct ath_common *common, !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; - if (!rx_stats->rs_datalen) { - RX_STAT_INC(rx_len_err); - return false; - } - - /* - * rs_status follows rs_datalen so if rs_datalen is too large - * we can take a hint that hardware corrupted it, so ignore - * those frames. - */ - if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { - RX_STAT_INC(rx_len_err); - return false; - } - /* Only use error bits from the last fragment */ if (rx_stats->rs_more) return true; @@ -949,11 +933,33 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, struct ath_common *common = ath9k_hw_common(ah); bool discard_current = sc->rx.discard_next; + /* + * Discard corrupt descriptors which are marked in + * ath_get_next_rx_buf(). + */ sc->rx.discard_next = rx_stats->rs_more; if (discard_current) return -EINVAL; /* + * Discard zero-length packets. + */ + if (!rx_stats->rs_datalen) { + RX_STAT_INC(rx_len_err); + return -EINVAL; + } + + /* + * rs_status follows rs_datalen so if rs_datalen is too large + * we can take a hint that hardware corrupted it, so ignore + * those frames. + */ + if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { + RX_STAT_INC(rx_len_err); + return -EINVAL; + } + + /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ -- cgit v0.10.2 From 4a470647415276d43daf9238a5bd70acc2119555 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:12 +0530 Subject: ath9k: Fix RX crypto processing The keymiss events are valid only in the last descriptor of a packet. Fix this by making sure that we return early in case of chained descriptors. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index f8cc2b3..b04a971 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -785,10 +785,6 @@ static bool ath9k_rx_accept(struct ath_common *common, !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; - /* Only use error bits from the last fragment */ - if (rx_stats->rs_more) - return true; - mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && !ieee80211_has_morefrags(fc) && !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && @@ -959,6 +955,10 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, return -EINVAL; } + /* Only use status info from the last fragment */ + if (rx_stats->rs_more) + return 0; + /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. @@ -966,10 +966,6 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) return -EINVAL; - /* Only use status info from the last fragment */ - if (rx_stats->rs_more) - return 0; - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) return -EINVAL; -- cgit v0.10.2 From e0dd1a960bba04898bb590e96cf33ecbe3bf53e6 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:13 +0530 Subject: ath9k: Fix TSF processing There is no need to calculate the mactime for chained descriptor packets, so make sure that this is done only for the last fragment of valid packets. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index b04a971..9fabd5f 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -913,6 +913,22 @@ static void ath9k_process_rssi(struct ath_common *common, ah->stats.avgbrssi = rssi; } +static void ath9k_process_tsf(struct ath_rx_status *rs, + struct ieee80211_rx_status *rxs, + u64 tsf) +{ + u32 tsf_lower = tsf & 0xffffffff; + + rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp; + if (rs->rs_tstamp > tsf_lower && + unlikely(rs->rs_tstamp - tsf_lower > 0x10000000)) + rxs->mactime -= 0x100000000ULL; + + if (rs->rs_tstamp < tsf_lower && + unlikely(tsf_lower - rs->rs_tstamp > 0x10000000)) + rxs->mactime += 0x100000000ULL; +} + /* * For Decrypt or Demic errors, we only mark packet status here and always push * up the frame up to let mac80211 handle the actual error case, be it no @@ -922,7 +938,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, struct ieee80211_hdr *hdr, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rx_status, - bool *decrypt_error) + bool *decrypt_error, u64 tsf) { struct ieee80211_hw *hw = sc->hw; struct ath_hw *ah = sc->sc_ah; @@ -959,6 +975,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (rx_stats->rs_more) return 0; + ath9k_process_tsf(rx_stats, rx_status, tsf); + /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. @@ -1196,7 +1214,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int dma_type; u64 tsf = 0; - u32 tsf_lower = 0; unsigned long flags; dma_addr_t new_buf_addr; @@ -1208,7 +1225,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; tsf = ath9k_hw_gettsf64(ah); - tsf_lower = tsf & 0xffffffff; do { bool decrypt_error = false; @@ -1249,15 +1265,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); - rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; - if (rs.rs_tstamp > tsf_lower && - unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) - rxs->mactime -= 0x100000000ULL; - - if (rs.rs_tstamp < tsf_lower && - unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) - rxs->mactime += 0x100000000ULL; - if (rs.rs_status & ATH9K_RXERR_PHY) { ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime); @@ -1268,7 +1275,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) } retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs, - &decrypt_error); + &decrypt_error, tsf); if (retval) goto requeue_drop_frag; -- cgit v0.10.2 From 3105b67276f2d3b09669e0a2b1337c9d99f573bd Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:14 +0530 Subject: ath9k: Reorder some functions Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 9fabd5f..51e7d16 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -929,6 +929,116 @@ static void ath9k_process_tsf(struct ath_rx_status *rs, rxs->mactime += 0x100000000ULL; } +#ifdef CONFIG_ATH9K_DEBUGFS +static s8 fix_rssi_inv_only(u8 rssi_val) +{ + if (rssi_val == 128) + rssi_val = 0; + return (s8) rssi_val; +} +#endif + +/* returns 1 if this was a spectral frame, even if not handled. */ +static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, + struct ath_rx_status *rs, u64 tsf) +{ +#ifdef CONFIG_ATH9K_DEBUGFS + struct ath_hw *ah = sc->sc_ah; + u8 bins[SPECTRAL_HT20_NUM_BINS]; + u8 *vdata = (u8 *)hdr; + struct fft_sample_ht20 fft_sample; + struct ath_radar_info *radar_info; + struct ath_ht20_mag_info *mag_info; + int len = rs->rs_datalen; + int dc_pos; + u16 length, max_magnitude; + + /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer + * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT + * yet, but this is supposed to be possible as well. + */ + if (rs->rs_phyerr != ATH9K_PHYERR_RADAR && + rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT && + rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL) + return 0; + + /* check if spectral scan bit is set. This does not have to be checked + * if received through a SPECTRAL phy error, but shouldn't hurt. + */ + radar_info = ((struct ath_radar_info *)&vdata[len]) - 1; + if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) + return 0; + + /* Variation in the data length is possible and will be fixed later. + * Note that we only support HT20 for now. + * + * TODO: add HT20_40 support as well. + */ + if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) || + (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1)) + return 1; + + fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20; + length = sizeof(fft_sample) - sizeof(fft_sample.tlv); + fft_sample.tlv.length = __cpu_to_be16(length); + + fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq); + fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); + fft_sample.noise = ah->noise; + + switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) { + case 0: + /* length correct, nothing to do. */ + memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS); + break; + case -1: + /* first byte missing, duplicate it. */ + memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1); + bins[0] = vdata[0]; + break; + case 2: + /* MAC added 2 extra bytes at bin 30 and 32, remove them. */ + memcpy(bins, vdata, 30); + bins[30] = vdata[31]; + memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31); + break; + case 1: + /* MAC added 2 extra bytes AND first byte is missing. */ + bins[0] = vdata[0]; + memcpy(&bins[0], vdata, 30); + bins[31] = vdata[31]; + memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32); + break; + default: + return 1; + } + + /* DC value (value in the middle) is the blind spot of the spectral + * sample and invalid, interpolate it. + */ + dc_pos = SPECTRAL_HT20_NUM_BINS / 2; + bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2; + + /* mag data is at the end of the frame, in front of radar_info */ + mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1; + + /* copy raw bins without scaling them */ + memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS); + fft_sample.max_exp = mag_info->max_exp & 0xf; + + max_magnitude = spectral_max_magnitude(mag_info->all_bins); + fft_sample.max_magnitude = __cpu_to_be16(max_magnitude); + fft_sample.max_index = spectral_max_index(mag_info->all_bins); + fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins); + fft_sample.tsf = __cpu_to_be64(tsf); + + ath_debug_send_fft_sample(sc, &fft_sample.tlv); + return 1; +#else + return 0; +#endif +} + /* * For Decrypt or Demic errors, we only mark packet status here and always push * up the frame up to let mac80211 handle the actual error case, be it no @@ -1052,116 +1162,6 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common, rxs->flag &= ~RX_FLAG_DECRYPTED; } -#ifdef CONFIG_ATH9K_DEBUGFS -static s8 fix_rssi_inv_only(u8 rssi_val) -{ - if (rssi_val == 128) - rssi_val = 0; - return (s8) rssi_val; -} -#endif - -/* returns 1 if this was a spectral frame, even if not handled. */ -static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, - struct ath_rx_status *rs, u64 tsf) -{ -#ifdef CONFIG_ATH9K_DEBUGFS - struct ath_hw *ah = sc->sc_ah; - u8 bins[SPECTRAL_HT20_NUM_BINS]; - u8 *vdata = (u8 *)hdr; - struct fft_sample_ht20 fft_sample; - struct ath_radar_info *radar_info; - struct ath_ht20_mag_info *mag_info; - int len = rs->rs_datalen; - int dc_pos; - u16 length, max_magnitude; - - /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer - * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT - * yet, but this is supposed to be possible as well. - */ - if (rs->rs_phyerr != ATH9K_PHYERR_RADAR && - rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT && - rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL) - return 0; - - /* check if spectral scan bit is set. This does not have to be checked - * if received through a SPECTRAL phy error, but shouldn't hurt. - */ - radar_info = ((struct ath_radar_info *)&vdata[len]) - 1; - if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) - return 0; - - /* Variation in the data length is possible and will be fixed later. - * Note that we only support HT20 for now. - * - * TODO: add HT20_40 support as well. - */ - if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) || - (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1)) - return 1; - - fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20; - length = sizeof(fft_sample) - sizeof(fft_sample.tlv); - fft_sample.tlv.length = __cpu_to_be16(length); - - fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq); - fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); - fft_sample.noise = ah->noise; - - switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) { - case 0: - /* length correct, nothing to do. */ - memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS); - break; - case -1: - /* first byte missing, duplicate it. */ - memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1); - bins[0] = vdata[0]; - break; - case 2: - /* MAC added 2 extra bytes at bin 30 and 32, remove them. */ - memcpy(bins, vdata, 30); - bins[30] = vdata[31]; - memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31); - break; - case 1: - /* MAC added 2 extra bytes AND first byte is missing. */ - bins[0] = vdata[0]; - memcpy(&bins[0], vdata, 30); - bins[31] = vdata[31]; - memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32); - break; - default: - return 1; - } - - /* DC value (value in the middle) is the blind spot of the spectral - * sample and invalid, interpolate it. - */ - dc_pos = SPECTRAL_HT20_NUM_BINS / 2; - bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2; - - /* mag data is at the end of the frame, in front of radar_info */ - mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1; - - /* copy raw bins without scaling them */ - memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS); - fft_sample.max_exp = mag_info->max_exp & 0xf; - - max_magnitude = spectral_max_magnitude(mag_info->all_bins); - fft_sample.max_magnitude = __cpu_to_be16(max_magnitude); - fft_sample.max_index = spectral_max_index(mag_info->all_bins); - fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins); - fft_sample.tsf = __cpu_to_be64(tsf); - - ath_debug_send_fft_sample(sc, &fft_sample.tlv); - return 1; -#else - return 0; -#endif -} - static void ath9k_apply_ampdu_details(struct ath_softc *sc, struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) { -- cgit v0.10.2 From 6b87d71c1ad41a3d0402286534909d0dc6285a51 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:15 +0530 Subject: ath9k: Fix PHY error processing Parse the PHY error details only for the last fragment in case descriptors are chained. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 51e7d16..e18adde 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -803,8 +803,6 @@ static bool ath9k_rx_accept(struct ath_common *common, rxs->flag |= RX_FLAG_FAILED_FCS_CRC; mic_error = false; } - if (rx_stats->rs_status & ATH9K_RXERR_PHY) - return false; if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { @@ -1088,6 +1086,18 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, ath9k_process_tsf(rx_stats, rx_status, tsf); /* + * Process PHY errors and return so that the packet + * can be dropped. + */ + if (rx_stats->rs_status & ATH9K_RXERR_PHY) { + ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime); + if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) + RX_STAT_INC(rx_spectral); + + return -EINVAL; + } + + /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ @@ -1265,15 +1275,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); - if (rs.rs_status & ATH9K_RXERR_PHY) { - ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime); - - if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) { - RX_STAT_INC(rx_spectral); - goto requeue_drop_frag; - } - } - retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs, &decrypt_error, tsf); if (retval) -- cgit v0.10.2 From 5e85a32aca03aba3ce7e7123943b4529d2969a95 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:16 +0530 Subject: ath9k: Fix RX debug statistics The various error bits that ath_debug_stat_rx() checks are valid only for the last descriptor for a chained packet, handle this correctly. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index e18adde..2d0017c 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1084,6 +1084,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, return 0; ath9k_process_tsf(rx_stats, rx_status, tsf); + ath_debug_stat_rx(sc, rx_stats); /* * Process PHY errors and return so that the packet @@ -1270,8 +1271,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) !ieee80211_is_qos_nullfunc(hdr->frame_control)) sc->rx.num_pkts++; - ath_debug_stat_rx(sc, &rs); - rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); -- cgit v0.10.2 From a5525d9c8246cad653858044ccfd8a16143e84f6 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:17 +0530 Subject: ath9k: Fix RX packet counter Handle chained descriptors and increment the RX counter only for valid packets. Since this is used only by MCI, use CONFIG_ATH9K_BTCOEX_SUPPORT. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 2d0017c..823b411 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1119,6 +1119,13 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; sc->rx.discard_next = false; + +#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + if (ieee80211_is_data_present(hdr->frame_control) && + !ieee80211_is_qos_nullfunc(hdr->frame_control)) + sc->rx.num_pkts++; +#endif + return 0; } @@ -1267,10 +1274,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) hdr = (struct ieee80211_hdr *) (hdr_skb->data + ah->caps.rx_status_len); - if (ieee80211_is_data_present(hdr->frame_control) && - !ieee80211_is_qos_nullfunc(hdr->frame_control)) - sc->rx.num_pkts++; - rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); -- cgit v0.10.2 From 6f38482eb0630e0be304d5c4fa0e2e5c0a701e91 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:18 +0530 Subject: ath9k: Fix RX beacon processing Make sure that chained descriptors are handled correctly before the packet is parsed to determine if it is a beacon. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 823b411..090c27e 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1037,13 +1037,28 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, #endif } +static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if (ieee80211_is_beacon(hdr->frame_control)) { + RX_STAT_INC(rx_beacons); + if (!is_zero_ether_addr(common->curbssid) && + ether_addr_equal(hdr->addr3, common->curbssid)) + return true; + } + + return false; +} + /* * For Decrypt or Demic errors, we only mark packet status here and always push * up the frame up to let mac80211 handle the actual error case, be it no * decryption key or real decryption error. This let us keep statistics there. */ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, - struct ieee80211_hdr *hdr, + struct sk_buff *skb, struct ath_rx_status *rx_stats, struct ieee80211_rx_status *rx_status, bool *decrypt_error, u64 tsf) @@ -1051,6 +1066,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, struct ieee80211_hw *hw = sc->hw; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_hdr *hdr; bool discard_current = sc->rx.discard_next; /* @@ -1083,6 +1099,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (rx_stats->rs_more) return 0; + hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); + ath9k_process_tsf(rx_stats, rx_status, tsf); ath_debug_stat_rx(sc, rx_stats); @@ -1105,6 +1123,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) return -EINVAL; + rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr); + if (ath9k_process_rate(common, hw, rx_stats, rx_status)) return -EINVAL; @@ -1198,24 +1218,6 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc, } } -static bool ath9k_is_mybeacon(struct ath_softc *sc, struct sk_buff *skb) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ieee80211_hdr *hdr; - - hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); - - if (ieee80211_is_beacon(hdr->frame_control)) { - RX_STAT_INC(rx_beacons); - if (!is_zero_ether_addr(common->curbssid) && - ether_addr_equal(hdr->addr3, common->curbssid)) - return true; - } - - return false; -} - int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_buf *bf; @@ -1225,7 +1227,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hw *hw = sc->hw; - struct ieee80211_hdr *hdr; int retval; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; @@ -1269,15 +1270,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) else hdr_skb = skb; - rs.is_mybeacon = ath9k_is_mybeacon(sc, hdr_skb); - - hdr = (struct ieee80211_hdr *) (hdr_skb->data + - ah->caps.rx_status_len); - rxs = IEEE80211_SKB_RXCB(hdr_skb); memset(rxs, 0, sizeof(struct ieee80211_rx_status)); - retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs, + retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs, &decrypt_error, tsf); if (retval) goto requeue_drop_frag; -- cgit v0.10.2 From eb5f952c31abdd5849fb9005beb3dc4ac734c355 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:19 +0530 Subject: ath9k: Move the RX poll check to preprocess() Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 090c27e..5b84ce4 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1124,6 +1124,10 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, return -EINVAL; rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr); + if (rx_stats->is_mybeacon) { + sc->hw_busy_count = 0; + ath_start_rx_poll(sc, 3); + } if (ath9k_process_rate(common, hw, rx_stats, rx_status)) return -EINVAL; @@ -1278,10 +1282,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (retval) goto requeue_drop_frag; - if (rs.is_mybeacon) { - sc->hw_busy_count = 0; - ath_start_rx_poll(sc, 3); - } /* Ensure we always have an skb to requeue once we are done * processing the current buffer's skb */ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); -- cgit v0.10.2 From b09255957b69d7b5ea1ad9d2b455a4f8769e06e7 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:20 +0530 Subject: ath9k: Handle corrupt descriptors properly The MIC/PHYERR/CRC error bits are valid only for the last desc. for chained packets. Check this early in the preprocess() routine and bail out. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 5b84ce4..30cb726 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1099,6 +1099,16 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (rx_stats->rs_more) return 0; + /* + * Return immediately if the RX descriptor has been marked + * as corrupt based on the various error bits. + * + * This is different from the other corrupt descriptor + * condition handled above. + */ + if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) + return -EINVAL; + hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); ath9k_process_tsf(rx_stats, rx_status, tsf); @@ -1335,8 +1345,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) sc->rx.frag = skb; goto requeue; } - if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC) - goto requeue_drop_frag; if (sc->rx.frag) { int space = skb->len - skb_tailroom(hdr_skb); -- cgit v0.10.2 From 7c5c73cde03c876b23d840e82f4df687bce83b44 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:21 +0530 Subject: ath9k: Fix error condition for corrupt descriptors In case a descriptor has the "done" bit clear and the next descriptor has it set, we drop both of them. If the packet that is received after these two packets is dropped for some reason, "discard_next" will not cleared. Fix this. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 30cb726..ec89280 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1068,6 +1068,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hdr *hdr; bool discard_current = sc->rx.discard_next; + int ret = 0; /* * Discard corrupt descriptors which are marked in @@ -1106,8 +1107,10 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, * This is different from the other corrupt descriptor * condition handled above. */ - if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) - return -EINVAL; + if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) { + ret = -EINVAL; + goto exit; + } hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); @@ -1123,15 +1126,18 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) RX_STAT_INC(rx_spectral); - return -EINVAL; + ret = -EINVAL; + goto exit; } /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ - if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) - return -EINVAL; + if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) { + ret = -EINVAL; + goto exit; + } rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr); if (rx_stats->is_mybeacon) { @@ -1139,8 +1145,10 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, ath_start_rx_poll(sc, 3); } - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) - return -EINVAL; + if (ath9k_process_rate(common, hw, rx_stats, rx_status)) { + ret =-EINVAL; + goto exit; + } ath9k_process_rssi(common, hw, hdr, rx_stats); @@ -1152,15 +1160,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (rx_stats->rs_moreaggr) rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; - sc->rx.discard_next = false; - #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT if (ieee80211_is_data_present(hdr->frame_control) && !ieee80211_is_qos_nullfunc(hdr->frame_control)) sc->rx.num_pkts++; #endif - return 0; +exit: + sc->rx.discard_next = false; + return ret; } static void ath9k_rx_skb_postprocess(struct ath_common *common, -- cgit v0.10.2 From ea3ef101d750f78dc1e532bcf759ab9afea295df Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 09:11:22 +0530 Subject: ath9k: Remove unused function argument Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index ec89280..3b47198 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -885,7 +885,6 @@ static int ath9k_process_rate(struct ath_common *common, static void ath9k_process_rssi(struct ath_common *common, struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, struct ath_rx_status *rx_stats) { struct ath_softc *sc = hw->priv; @@ -1150,7 +1149,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, goto exit; } - ath9k_process_rssi(common, hw, hdr, rx_stats); + ath9k_process_rssi(common, hw, rx_stats); rx_status->band = hw->conf.chandef.chan->band; rx_status->freq = hw->conf.chandef.chan->center_freq; -- cgit v0.10.2 From e3acd13d2141fa25566877e8f575065f204317f5 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 21:15:54 +0530 Subject: ath9k: Handle invalid RSSI The combined RSSI can be invalid which is indicated by the value -128. Use RX_FLAG_NO_SIGNAL_VAL for such cases. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 3b47198..6ad7d61 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -885,29 +885,49 @@ static int ath9k_process_rate(struct ath_common *common, static void ath9k_process_rssi(struct ath_common *common, struct ieee80211_hw *hw, - struct ath_rx_status *rx_stats) + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rxs) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = common->ah; int last_rssi; int rssi = rx_stats->rs_rssi; - if (!rx_stats->is_mybeacon || - ((ah->opmode != NL80211_IFTYPE_STATION) && - (ah->opmode != NL80211_IFTYPE_ADHOC))) + /* + * RSSI is not available for subframes in an A-MPDU. + */ + if (rx_stats->rs_moreaggr) { + rxs->flag |= RX_FLAG_NO_SIGNAL_VAL; + return; + } + + /* + * Check if the RSSI for the last subframe in an A-MPDU + * or an unaggregated frame is valid. + */ + if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) { + rxs->flag |= RX_FLAG_NO_SIGNAL_VAL; return; + } - if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) + /* + * Update Beacon RSSI, this is used by ANI. + */ + if (rx_stats->is_mybeacon && + ((ah->opmode == NL80211_IFTYPE_STATION) || + (ah->opmode == NL80211_IFTYPE_ADHOC))) { ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); + last_rssi = sc->last_rssi; + + if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) + rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); + if (rssi < 0) + rssi = 0; - last_rssi = sc->last_rssi; - if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) - rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); - if (rssi < 0) - rssi = 0; + ah->stats.avgbrssi = rssi; + } - /* Update Beacon RSSI, this is used by ANI. */ - ah->stats.avgbrssi = rssi; + rxs->signal = ah->noise + rx_stats->rs_rssi; } static void ath9k_process_tsf(struct ath_rx_status *rs, @@ -1149,15 +1169,12 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, goto exit; } - ath9k_process_rssi(common, hw, rx_stats); + ath9k_process_rssi(common, hw, rx_stats, rx_status); rx_status->band = hw->conf.chandef.chan->band; rx_status->freq = hw->conf.chandef.chan->center_freq; - rx_status->signal = ah->noise + rx_stats->rs_rssi; rx_status->antenna = rx_stats->rs_antenna; rx_status->flag |= RX_FLAG_MACTIME_END; - if (rx_stats->rs_moreaggr) - rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT if (ieee80211_is_data_present(hdr->frame_control) && -- cgit v0.10.2 From 009af8fb69c9d6274b3a49377be5ba2379e188d6 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 21:15:55 +0530 Subject: ath9k: Identify first subframe in an A-MPDU Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 5163abd..f6c5c1b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -491,6 +491,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_rate = MS(rxsp->status1, AR_RxRate); rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0; + rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0; rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0; rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 2ef05eb..a3eff09 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -583,9 +583,9 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate); rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; + rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0; rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; - rs->rs_moreaggr = - (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; + rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); /* directly mapped flags for ieee80211_rx_status */ diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index b02dfce..bfccace 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -140,6 +140,7 @@ struct ath_rx_status { int8_t rs_rssi_ext1; int8_t rs_rssi_ext2; u8 rs_isaggr; + u8 rs_firstaggr; u8 rs_moreaggr; u8 rs_num_delims; u8 rs_flags; @@ -569,6 +570,7 @@ struct ar5416_desc { #define AR_RxAggr 0x00020000 #define AR_PostDelimCRCErr 0x00040000 #define AR_RxStatusRsvd71 0x3ff80000 +#define AR_RxFirstAggr 0x20000000 #define AR_DecryptBusyErr 0x40000000 #define AR_KeyMiss 0x80000000 -- cgit v0.10.2 From c3124df7962f7a58177073d54d451e1661ffb71f Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 21:15:56 +0530 Subject: ath9k: Optimize LNA check The documentation for antenna diversity says: "The decision of diversity is done at 802.11 preamble. So, for 11G/11B, for every MAC packet hardware will do a decision. But in 11N with aggregation, the decision is made only at the preamble and all other MPDUs will use the same LNA as the first MPDU." Make use of rs_firstaggr to avoid needlessly scanning for LNA changes. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 6ad7d61..6161d14 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1238,6 +1238,52 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common, rxs->flag &= ~RX_FLAG_DECRYPTED; } +/* + * Run the LNA combining algorithm only in these cases: + * + * Standalone WLAN cards with both LNA/Antenna diversity + * enabled in the EEPROM. + * + * WLAN+BT cards which are in the supported card list + * in ath_pci_id_table and the user has loaded the + * driver with "bt_ant_diversity" set to true. + */ +static void ath9k_antenna_check(struct ath_softc *sc, + struct ath_rx_status *rs) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + + if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) + return; + + /* + * All MPDUs in an aggregate will use the same LNA + * as the first MPDU. + */ + if (rs->rs_isaggr && !rs->rs_firstaggr) + return; + + /* + * Change the default rx antenna if rx diversity + * chooses the other antenna 3 times in a row. + */ + if (sc->rx.defant != rs->rs_antenna) { + if (++sc->rx.rxotherant >= 3) + ath_setdefantenna(sc, rs->rs_antenna); + } else { + sc->rx.rxotherant = 0; + } + + if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { + if (common->bt_ant_diversity) + ath_ant_comb_scan(sc, rs); + } else { + ath_ant_comb_scan(sc, rs); + } +} + static void ath9k_apply_ampdu_details(struct ath_softc *sc, struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) { @@ -1262,7 +1308,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; struct ieee80211_rx_status *rxs; struct ath_hw *ah = sc->sc_ah; - struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hw *hw = sc->hw; int retval; @@ -1398,35 +1443,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) ath_rx_ps(sc, skb, rs.is_mybeacon); spin_unlock_irqrestore(&sc->sc_pm_lock, flags); - /* - * Run the LNA combining algorithm only in these cases: - * - * Standalone WLAN cards with both LNA/Antenna diversity - * enabled in the EEPROM. - * - * WLAN+BT cards which are in the supported card list - * in ath_pci_id_table and the user has loaded the - * driver with "bt_ant_diversity" set to true. - */ - if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { - /* - * Change the default rx antenna if rx diversity - * chooses the other antenna 3 times in a row. - */ - if (sc->rx.defant != rs.rs_antenna) { - if (++sc->rx.rxotherant >= 3) - ath_setdefantenna(sc, rs.rs_antenna); - } else { - sc->rx.rxotherant = 0; - } - - if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { - if (common->bt_ant_diversity) - ath_ant_comb_scan(sc, &rs); - } else { - ath_ant_comb_scan(sc, &rs); - } - } + ath9k_antenna_check(sc, &rs); ath9k_apply_ampdu_details(sc, &rs, rxs); -- cgit v0.10.2 From 5d07cca2128c214ea6029a6b65082e642ee7355e Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Wed, 14 Aug 2013 21:15:57 +0530 Subject: ath9k: Use lockless variant to initialize RX fifo Since the rx_fifo queue is accessed only using the various lockless SKB queue routines, there is no need to initialize the lock and __skb_queue_head_init() can be used. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 6161d14..653f7fc 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -190,7 +190,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc) static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) { - skb_queue_head_init(&rx_edma->rx_fifo); + __skb_queue_head_init(&rx_edma->rx_fifo); rx_edma->rx_fifo_hwsize = size; } -- cgit v0.10.2 From e5b417e73efa52b9179ce2a2b7676f08425c62e3 Mon Sep 17 00:00:00 2001 From: Mark Schulte Date: Wed, 31 Jul 2013 00:08:27 -0700 Subject: rtlwifi: sparse warnings: cast to restricted type Adding type casts to suppress sparse warnings: * warning: cast to restricted __le32/__le16 Signed-off-by: Mark Schulte Acked-by: Larry Finger Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index 298b615..f646b75 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -688,7 +688,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, find_p2p_ie = true; /*to find noa ie*/ while (ie + 1 < end) { - noa_len = READEF2BYTE(&ie[1]); + noa_len = READEF2BYTE((__le16 *)&ie[1]); if (ie + 3 + ie[1] > end) return; @@ -717,13 +717,13 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, READEF1BYTE(ie+index); index += 1; p2pinfo->noa_duration[i] = - READEF4BYTE(ie+index); + READEF4BYTE((__le32 *)ie+index); index += 4; p2pinfo->noa_interval[i] = - READEF4BYTE(ie+index); + READEF4BYTE((__le32 *)ie+index); index += 4; p2pinfo->noa_start_time[i] = - READEF4BYTE(ie+index); + READEF4BYTE((__le32 *)ie+index); index += 4; } @@ -780,7 +780,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n"); /*to find noa ie*/ while (ie + 1 < end) { - noa_len = READEF2BYTE(&ie[1]); + noa_len = READEF2BYTE((__le16 *)&ie[1]); if (ie + 3 + ie[1] > end) return; @@ -809,13 +809,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, READEF1BYTE(ie+index); index += 1; p2pinfo->noa_duration[i] = - READEF4BYTE(ie+index); + READEF4BYTE((__le32 *)ie+index); index += 4; p2pinfo->noa_interval[i] = - READEF4BYTE(ie+index); + READEF4BYTE((__le32 *)ie+index); index += 4; p2pinfo->noa_start_time[i] = - READEF4BYTE(ie+index); + READEF4BYTE((__le32 *)ie+index); index += 4; } -- cgit v0.10.2 From 67d0cf50bd32b66eab709871714e55725ee30ce4 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Fri, 9 Aug 2013 13:36:21 -0400 Subject: brcmsmac: Fix WARNING caused by lack of calls to dma_mapping_error() The driver fails to check the results of DMA mapping in twp places, which results in the following warning: [ 28.078515] ------------[ cut here ]------------ [ 28.078529] WARNING: at lib/dma-debug.c:937 check_unmap+0x47e/0x930() [ 28.078533] bcma-pci-bridge 0000:0e:00.0: DMA-API: device driver failed to check map error[device address=0x00000000b5d60d6c] [size=1876 bytes] [mapped as single] [ 28.078536] Modules linked in: bnep bluetooth vboxpci(O) vboxnetadp(O) vboxnetflt(O) vboxdrv(O) ipv6 b43 brcmsmac rtl8192cu rtl8192c_common rtlwifi mac802 11 brcmutil cfg80211 snd_hda_codec_conexant rng_core snd_hda_intel kvm_amd snd_hda_codec ssb kvm mmc_core snd_pcm snd_seq snd_timer snd_seq_device snd k8temp cordic joydev serio_raw hwmon sr_mod sg pcmcia pcmcia_core soundcore cdrom i2c_nforce2 i2c_core forcedeth bcma snd_page_alloc autofs4 ext4 jbd2 mbcache crc1 6 scsi_dh_alua scsi_dh_hp_sw scsi_dh_rdac scsi_dh_emc scsi_dh ata_generic pata_amd [ 28.078602] CPU: 1 PID: 2570 Comm: NetworkManager Tainted: G O 3.10.0-rc7-wl+ #42 [ 28.078605] Hardware name: Hewlett-Packard HP Pavilion dv2700 Notebook PC/30D6, BIOS F.27 11/27/2008 [ 28.078607] 0000000000000009 ffff8800bbb03ad8 ffffffff8144f898 ffff8800bbb03b18 [ 28.078612] ffffffff8103e1eb 0000000000000002 ffff8800b719f480 ffff8800b7b9c010 [ 28.078617] ffffffff824204c0 ffffffff81754d57 0000000000000754 ffff8800bbb03b78 [ 28.078622] Call Trace: [ 28.078624] [] dump_stack+0x19/0x1b [ 28.078634] [] warn_slowpath_common+0x6b/0xa0 [ 28.078638] [] warn_slowpath_fmt+0x41/0x50 [ 28.078650] [] check_unmap+0x47e/0x930 [ 28.078655] [] debug_dma_unmap_page+0x5c/0x70 [ 28.078679] [] dma64_getnextrxp+0x10c/0x190 [brcmsmac] [ 28.078691] [] dma_rx+0x62/0x240 [brcmsmac] [ 28.078707] [] brcms_c_dpc+0x211/0x9d0 [brcmsmac] [ 28.078717] [] ? brcms_dpc+0x27/0xf0 [brcmsmac] [ 28.078731] [] brcms_dpc+0x47/0xf0 [brcmsmac] [ 28.078736] [] tasklet_action+0x6c/0xf0 --snip-- [ 28.078974] [] SyS_sendmsg+0xd/0x20 [ 28.078979] [] tracesys+0xdd/0xe2 [ 28.078982] ---[ end trace 6164d1a08148e9c8 ]--- [ 28.078984] Mapped at: [ 28.078985] [] debug_dma_map_page+0x9d/0x150 [ 28.078989] [] dma_rxfill+0x102/0x3d0 [brcmsmac] [ 28.079001] [] brcms_c_init+0x87d/0x1100 [brcmsmac] [ 28.079010] [] brcms_init+0x21/0x30 [brcmsmac] [ 28.079018] [] brcms_c_up+0x150/0x430 [brcmsmac] As the patch adds a new failure mechanism to dma_rxfill(). When I changed the comment at the start of the routine to add that information, I also polished the wording. Signed-off-by: Larry Finger Cc: Stable Cc: Brett Rudley Cc: Franky (Zhenhui) Lin Cc: Hante Meuleman Cc: brcm80211-dev-list@broadcom.com Acked-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c index 1860c57..4fb9635 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di) /* * post receive buffers - * return false is refill failed completely and ring is empty this will stall - * the rx dma and user might want to call rxfill again asap. This unlikely - * happens on memory-rich NIC, but often on memory-constrained dongle + * Return false if refill failed completely or dma mapping failed. The ring + * is empty, which will stall the rx dma and user might want to call rxfill + * again asap. This is unlikely to happen on a memory-rich NIC, but often on + * memory-constrained dongle. */ bool dma_rxfill(struct dma_pub *pub) { @@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub) pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, DMA_FROM_DEVICE); + if (dma_mapping_error(di->dmadev, pa)) + return false; /* save the free packet pointer */ di->rxp[rxout] = p; @@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p) /* get physical address of buffer start */ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); - + /* if mapping failed, free skb */ + if (dma_mapping_error(di->dmadev, pa)) { + brcmu_pkt_buf_free_skb(p); + return; + } /* With a DMA segment list, Descriptor table is filled * using the segment list instead of looping over * buffers in multi-chain DMA. Therefore, EOF for SGLIST -- cgit v0.10.2 From d14c5ab6bef6a46170b84c3589b27768e979f93d Mon Sep 17 00:00:00 2001 From: Francesco Fusco Date: Thu, 15 Aug 2013 13:42:14 +0200 Subject: net: proc_fs: trivial: print UIDs as unsigned int UIDs are printed in the proc_fs as signed int, whereas they are unsigned int. Signed-off-by: Francesco Fusco Signed-off-by: David S. Miller diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index c30f3a0..af46bc4 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -178,7 +178,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v) at = at_sk(s); seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X " - "%02X %d\n", + "%02X %u\n", s->sk_type, ntohs(at->src_net), at->src_node, at->src_port, ntohs(at->dest_net), at->dest_node, at->dest_port, sk_wmem_alloc_get(s), diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 746427c..d7d9882 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -1082,7 +1082,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index dd44e0a..41d8450 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -987,7 +987,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) srcp = inet->inet_num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n", i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ec27028..05a3d45 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2608,7 +2608,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req, long delta = req->expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n", + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n", i, ireq->loc_addr, ntohs(inet_sk(sk)->inet_sport), @@ -2666,7 +2666,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " - "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n", + "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n", i, src, srcp, dest, destp, sk->sk_state, tp->write_seq - tp->snd_una, rx_queue, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9e88af0..0b24508 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2159,7 +2159,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 197e6f4..48b6bd2 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -890,7 +890,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, src = &np->rcv_saddr; seq_printf(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n", bucket, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 38c196c..5bcfadf 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1731,7 +1731,7 @@ static void get_openreq6(struct seq_file *seq, seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], @@ -1782,7 +1782,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index 65e8833..e15c16a 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c @@ -213,7 +213,7 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v) ntohs(ipxs->dest_addr.sock)); } - seq_printf(seq, "%08X %08X %02X %03d\n", + seq_printf(seq, "%08X %08X %02X %03u\n", sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_state, diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index 7b4799c..1a3c7e0 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -147,7 +147,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v) } seq_printf(seq, "@%02X ", llc->sap->laddr.lsap); llc_ui_format_mac(seq, llc->daddr.mac); - seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap, + seq_printf(seq, "@%02X %8d %8d %2d %3u %4d\n", llc->daddr.lsap, sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk) - llc->copied_seq, sk->sk_state, diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 1afd138..77e38f7 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -793,7 +793,7 @@ static int pn_res_seq_show(struct seq_file *seq, void *v) struct sock **psk = v; struct sock *sk = *psk; - seq_printf(seq, "%02X %5d %lu%n", + seq_printf(seq, "%02X %5u %lu%n", (int) (psk - pnres.sk), from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk), &len); diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 82432bf..0c06421 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -226,7 +226,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) sk = epb->sk; if (!net_eq(sock_net(sk), seq_file_net(seq))) continue; - seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, + seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, epb->bind_addr.port, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), @@ -336,7 +336,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) continue; seq_printf(seq, "%8pK %8pK %-3d %-3d %-2d %-4d " - "%4d %8d %8d %7d %5lu %-5d %5d ", + "%4d %8d %8d %7u %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, assoc->state, hash, assoc->assoc_id, -- cgit v0.10.2 From 91b86e3dd00862c6ed36691c849762949aaa833e Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 15 Aug 2013 08:27:25 -0400 Subject: qlcnic: Reinitialize mailbox data structures after firmware reset o After firmware reset VFs were failing to come up because of not reinitializing mailbox data structures. Reinitialize them so that VFs can come up after firmware reset. Signed-off-by: Manish Chopra Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 046286a..dc24979 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -1556,7 +1556,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) { int err; - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); -- cgit v0.10.2 From 35570cfefd15d89ede00ce550e06acbcac3b3098 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 15 Aug 2013 08:27:26 -0400 Subject: qlcnic: Flush mailbox command list when mailbox is not available o Driver was hitting a panic at the time of adapter reset due to invalid command access from the list which had been already freed by the queuing thread. Flush all the pending commands from the list before proceeding with adapter reset Signed-off-by: Manish Chopra Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 55a5977..43b51a0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3515,6 +3515,8 @@ static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) while (!list_empty(head)) { cmd = list_entry(head->next, struct qlcnic_cmd_args, list); + dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n", + __func__, cmd->cmd_op); list_del(&cmd->list); mbx->num_cmds--; qlcnic_83xx_notify_cmd_completion(adapter, cmd); @@ -3534,6 +3536,7 @@ static inline int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); if (host_mbx_ctrl) { + clear_bit(QLC_83XX_MBX_READY, &mbx->status); ahw->idc.collect_dump = 1; return -EIO; } @@ -3704,8 +3707,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) ahw = adapter->ahw; while (true) { - if (qlcnic_83xx_check_mbx_status(adapter)) + if (qlcnic_83xx_check_mbx_status(adapter)) { + qlcnic_83xx_flush_mbx_queue(adapter); return; + } atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); -- cgit v0.10.2 From b5acb255e6588cc391d1b3c9afcd80407c2581b3 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 15 Aug 2013 08:27:27 -0400 Subject: qlcnic: Fix driver initialization for 83xx adapters o Load firmware from file before setting up interrupts. Signed-off-by: Manish Chopra Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index cc1e32a..17c26a1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2169,6 +2169,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (err) goto detach_mbx; + if (!qlcnic_83xx_read_flash_descriptor_table(adapter)) + qlcnic_83xx_read_flash_mfg_id(adapter); + + err = qlcnic_83xx_idc_init(adapter); + if (err) + goto detach_mbx; + err = qlcnic_setup_intr(adapter, 0); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); @@ -2186,13 +2193,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) /* register for NIC IDC AEN Events */ qlcnic_83xx_register_nic_idc_func(adapter, 1); - if (!qlcnic_83xx_read_flash_descriptor_table(adapter)) - qlcnic_83xx_read_flash_mfg_id(adapter); - - err = qlcnic_83xx_idc_init(adapter); - if (err) - goto disable_mbx_intr; - /* Configure default, SR-IOV or Virtual NIC mode of operation */ err = qlcnic_83xx_configure_opmode(adapter); if (err) -- cgit v0.10.2 From b942f44aff0f9a4c7a6e276988d0527b4164ffc9 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 15 Aug 2013 08:27:28 -0400 Subject: qlcnic: Dump mailbox command data when a command times out Signed-off-by: Manish Chopra Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 43b51a0..78ca755 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3736,6 +3736,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) __func__, cmd->cmd_op, cmd->type, ahw->pci_func, ahw->op_mode); clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_dump_mbx(adapter, cmd); qlcnic_83xx_idc_request_reset(adapter, QLCNIC_FORCE_FW_DUMP_KEY); cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT; -- cgit v0.10.2 From de98ac5eee037a7484ee9b87eae2e0fb67bdd3ec Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Thu, 15 Aug 2013 08:27:29 -0400 Subject: qlcnic: Update version to 5.2.46. Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 3935155..7387354 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 2 -#define _QLCNIC_LINUX_SUBVERSION 45 -#define QLCNIC_LINUX_VERSIONID "5.2.45" +#define _QLCNIC_LINUX_SUBVERSION 46 +#define QLCNIC_LINUX_VERSIONID "5.2.46" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v0.10.2 From 16b304f3404f8e0243d5ee2b70b68767b7b59b2b Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Thu, 15 Aug 2013 15:31:06 -0700 Subject: netlink: Eliminate kmalloc in netlink dump operation. Following patch stores struct netlink_callback in netlink_sock to avoid allocating and freeing it on every netlink dump msg. Only one dump operation is allowed for a given socket at a time therefore we can safely convert cb pointer to cb struct inside netlink_sock. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 6273772..a17dda1 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -595,7 +595,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock, * for dumps is performed here. A dump is allowed to continue * if at least half the ring is unused. */ - while (nlk->cb != NULL && netlink_dump_space(nlk)) { + while (nlk->cb_running && netlink_dump_space(nlk)) { err = netlink_dump(sk); if (err < 0) { sk->sk_err = err; @@ -802,18 +802,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0 #endif /* CONFIG_NETLINK_MMAP */ -static void netlink_destroy_callback(struct netlink_callback *cb) -{ - kfree_skb(cb->skb); - kfree(cb); -} - -static void netlink_consume_callback(struct netlink_callback *cb) -{ - consume_skb(cb->skb); - kfree(cb); -} - static void netlink_skb_destructor(struct sk_buff *skb) { #ifdef CONFIG_NETLINK_MMAP @@ -872,12 +860,12 @@ static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); - if (nlk->cb) { - if (nlk->cb->done) - nlk->cb->done(nlk->cb); + if (nlk->cb_running) { + if (nlk->cb.done) + nlk->cb.done(&nlk->cb); - module_put(nlk->cb->module); - netlink_destroy_callback(nlk->cb); + module_put(nlk->cb.module); + kfree_skb(nlk->cb.skb); } skb_queue_purge(&sk->sk_receive_queue); @@ -2350,7 +2338,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, skb_free_datagram(sk, skb); - if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { + if (nlk->cb_running && + atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { ret = netlink_dump(sk); if (ret) { sk->sk_err = ret; @@ -2566,13 +2555,12 @@ static int netlink_dump(struct sock *sk) int alloc_size; mutex_lock(nlk->cb_mutex); - - cb = nlk->cb; - if (cb == NULL) { + if (!nlk->cb_running) { err = -EINVAL; goto errout_skb; } + cb = &nlk->cb; alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); if (!netlink_rx_is_mmaped(sk) && @@ -2610,11 +2598,11 @@ static int netlink_dump(struct sock *sk) if (cb->done) cb->done(cb); - nlk->cb = NULL; - mutex_unlock(nlk->cb_mutex); + nlk->cb_running = false; + mutex_unlock(nlk->cb_mutex); module_put(cb->module); - netlink_consume_callback(cb); + consume_skb(cb->skb); return 0; errout_skb: @@ -2632,59 +2620,51 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, struct netlink_sock *nlk; int ret; - cb = kzalloc(sizeof(*cb), GFP_KERNEL); - if (cb == NULL) - return -ENOBUFS; - /* Memory mapped dump requests need to be copied to avoid looping * on the pending state in netlink_mmap_sendmsg() while the CB hold * a reference to the skb. */ if (netlink_skb_is_mmaped(skb)) { skb = skb_copy(skb, GFP_KERNEL); - if (skb == NULL) { - kfree(cb); + if (skb == NULL) return -ENOBUFS; - } } else atomic_inc(&skb->users); - cb->dump = control->dump; - cb->done = control->done; - cb->nlh = nlh; - cb->data = control->data; - cb->module = control->module; - cb->min_dump_alloc = control->min_dump_alloc; - cb->skb = skb; - sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); if (sk == NULL) { - netlink_destroy_callback(cb); - return -ECONNREFUSED; + ret = -ECONNREFUSED; + goto error_free; } - nlk = nlk_sk(sk); + nlk = nlk_sk(sk); mutex_lock(nlk->cb_mutex); /* A dump is in progress... */ - if (nlk->cb) { - mutex_unlock(nlk->cb_mutex); - netlink_destroy_callback(cb); + if (nlk->cb_running) { ret = -EBUSY; - goto out; + goto error_unlock; } /* add reference of module which cb->dump belongs to */ - if (!try_module_get(cb->module)) { - mutex_unlock(nlk->cb_mutex); - netlink_destroy_callback(cb); + if (!try_module_get(control->module)) { ret = -EPROTONOSUPPORT; - goto out; + goto error_unlock; } - nlk->cb = cb; + cb = &nlk->cb; + memset(cb, 0, sizeof(*cb)); + cb->dump = control->dump; + cb->done = control->done; + cb->nlh = nlh; + cb->data = control->data; + cb->module = control->module; + cb->min_dump_alloc = control->min_dump_alloc; + cb->skb = skb; + + nlk->cb_running = true; + mutex_unlock(nlk->cb_mutex); ret = netlink_dump(sk); -out: sock_put(sk); if (ret) @@ -2694,6 +2674,13 @@ out: * signal not to send ACK even if it was requested. */ return -EINTR; + +error_unlock: + sock_put(sk); + mutex_unlock(nlk->cb_mutex); +error_free: + kfree_skb(skb); + return ret; } EXPORT_SYMBOL(__netlink_dump_start); @@ -2916,14 +2903,14 @@ static int netlink_seq_show(struct seq_file *seq, void *v) struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); - seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", + seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->portid, nlk->groups ? (u32)nlk->groups[0] : 0, sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), - nlk->cb, + nlk->cb_running, atomic_read(&s->sk_refcnt), atomic_read(&s->sk_drops), sock_i_ino(s) diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index eaa88d1..acbd774 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -32,7 +32,8 @@ struct netlink_sock { unsigned long *groups; unsigned long state; wait_queue_head_t wait; - struct netlink_callback *cb; + bool cb_running; + struct netlink_callback cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; void (*netlink_rcv)(struct sk_buff *skb); -- cgit v0.10.2 From 99565a6c471cbb66caa68347c195133017559943 Mon Sep 17 00:00:00 2001 From: Fan Du Date: Thu, 15 Aug 2013 15:49:06 +0800 Subject: xfrm: Make xfrm_state timer monotonic xfrm_state timer should be independent of system clock change, so switch to CLOCK_BOOTTIME base which is not only monotonic but also counting suspend time. Thus issue reported in commit: 9e0d57fd6dad37d72a3ca6db00ca8c76f2215454 ("xfrm: SAD entries do not expire correctly after suspend-resume") could ALSO be avoided. v2: Use CLOCK_BOOTTIME to count suspend time, but still monotonic. Signed-off-by: Fan Du Acked-by: David S. Miller Signed-off-by: Steffen Klassert diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index b2cd806..4f8ace8 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -499,7 +499,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) INIT_HLIST_NODE(&x->bydst); INIT_HLIST_NODE(&x->bysrc); INIT_HLIST_NODE(&x->byspi); - tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS); + tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, + CLOCK_BOOTTIME, HRTIMER_MODE_ABS); setup_timer(&x->rtimer, xfrm_replay_timer_handler, (unsigned long)x); x->curlft.add_time = get_seconds(); -- cgit v0.10.2 From 3571ac3370ef4e81ed5d60d810682c7b7bd95264 Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Mon, 29 Jul 2013 17:08:48 +0300 Subject: iwlwifi: mvm: remove unused param of rs_dbgfs_set_mcs clean it up Signed-off-by: Eyal Shapira Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 5cb3132..9ae1bad 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -161,10 +161,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); #ifdef CONFIG_MAC80211_DEBUGFS static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index); + u32 *rate_n_flags); #else static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index) + u32 *rate_n_flags) {} #endif @@ -2370,7 +2370,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; /* Override starting rate (index 0) if needed for debug purposes */ - rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + rs_dbgfs_set_mcs(lq_sta, &new_rate); /* Interpret new_rate (rate_n_flags) */ rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, @@ -2417,7 +2417,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm, } /* Override next rate if needed for debug purposes */ - rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + rs_dbgfs_set_mcs(lq_sta, &new_rate); /* Fill next table entry */ lq_cmd->rs_table[index] = @@ -2459,7 +2459,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm, use_ht_possible = 0; /* Override next rate if needed for debug purposes */ - rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + rs_dbgfs_set_mcs(lq_sta, &new_rate); /* Fill next table entry */ lq_cmd->rs_table[index] = cpu_to_le32(new_rate); @@ -2504,7 +2504,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, #ifdef CONFIG_MAC80211_DEBUGFS static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, - u32 *rate_n_flags, int index) + u32 *rate_n_flags) { struct iwl_mvm *mvm; u8 valid_tx_ant; -- cgit v0.10.2 From 7abc4632bf1259a491188266a9aaa82ddeb55ff3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Aug 2013 18:28:26 +0200 Subject: iwlwifi: mvm: refactor resume from WoWLAN code To be able to add more logic to the resume code, refactor it a bit, moving some status checking/reporting logic into a new function. The locking becomes a bit odd (one of the new functions now unlocks the mutex) but this will be required to call new mac80211 APIs in there later. Reviewed-by: Luciano Coelho Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index ebf7f94..ec07f0a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1104,73 +1104,16 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) return __iwl_mvm_suspend(hw, wowlan, false); } -static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status *status) { - u32 base = mvm->error_event_table; - struct error_table_start { - /* cf. struct iwl_error_event_table */ - u32 valid; - u32 error_id; - } err_info; + struct sk_buff *pkt = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; - struct iwl_host_cmd cmd = { - .id = WOWLAN_GET_STATUSES, - .flags = CMD_SYNC | CMD_WANT_SKB, - }; - struct iwl_wowlan_status *status; - u32 reasons; - int ret, len; - struct sk_buff *pkt = NULL; - - iwl_trans_read_mem_bytes(mvm->trans, base, - &err_info, sizeof(err_info)); - - if (err_info.valid) { - IWL_INFO(mvm, "error table is valid (%d)\n", - err_info.valid); - if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { - wakeup.rfkill_release = true; - ieee80211_report_wowlan_wakeup(vif, &wakeup, - GFP_KERNEL); - } - return; - } - - /* only for tracing for now */ - ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL); - if (ret) - IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, "failed to query status (%d)\n", ret); - return; - } - - /* RF-kill already asserted again... */ - if (!cmd.resp_pkt) - return; - - len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - goto out; - } - - status = (void *)cmd.resp_pkt->data; - - if (len - sizeof(struct iwl_cmd_header) != - sizeof(*status) + - ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - goto out; - } - - reasons = le32_to_cpu(status->wakeup_reasons); + u32 reasons = le32_to_cpu(status->wakeup_reasons); if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { wakeup_report = NULL; @@ -1233,6 +1176,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, pktsize -= hdrlen; if (ieee80211_has_protected(hdr->frame_control)) { + /* + * This is unlocked and using gtk_i(c)vlen, + * but since everything is under RTNL still + * that's not really a problem - changing + * it would be difficult. + */ if (is_multicast_ether_addr(hdr->addr1)) { ivlen = mvm->gtk_ivlen; icvlen += mvm->gtk_icvlen; @@ -1283,9 +1232,82 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, report: ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); kfree_skb(pkt); +} - out: +/* releases the MVM mutex */ +static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 base = mvm->error_event_table; + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + u32 error_id; + } err_info; + struct iwl_host_cmd cmd = { + .id = WOWLAN_GET_STATUSES, + .flags = CMD_SYNC | CMD_WANT_SKB, + }; + struct iwl_wowlan_status *status; + int ret, len; + + iwl_trans_read_mem_bytes(mvm->trans, base, + &err_info, sizeof(err_info)); + + if (err_info.valid) { + IWL_INFO(mvm, "error table is valid (%d)\n", + err_info.valid); + if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + goto out_unlock; + } + + /* only for tracing for now */ + ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL); + if (ret) + IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query status (%d)\n", ret); + goto out_unlock; + } + + /* RF-kill already asserted again... */ + if (!cmd.resp_pkt) + goto out_unlock; + + len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + goto out_free_resp; + } + + status = (void *)cmd.resp_pkt->data; + + if (len - sizeof(struct iwl_cmd_header) != + sizeof(*status) + + ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) { + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + goto out_free_resp; + } + + /* now we have all the data we need, unlock to avoid mac80211 issues */ + mutex_unlock(&mvm->mutex); + + iwl_mvm_report_wakeup_reasons(mvm, vif, status); + iwl_free_resp(&cmd); + return; + + out_free_resp: iwl_free_resp(&cmd); + out_unlock: + mutex_unlock(&mvm->mutex); } static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) @@ -1342,10 +1364,13 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) iwl_mvm_read_d3_sram(mvm); iwl_mvm_query_wakeup_reasons(mvm, vif); + /* has unlocked the mutex, so skip that */ + goto out; out_unlock: mutex_unlock(&mvm->mutex); + out: if (!test && vif) ieee80211_resume_disconnect(vif); -- cgit v0.10.2 From 66140adc22aacd95037ac9f89eeff61676d36cce Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Mon, 12 Aug 2013 19:30:21 +0300 Subject: iwlwifi: use a macro for default probe length Instead of assigning the default max probe length to 200 in the main code, create a macro for consistency and clarity. Signed-off-by: Luciano Coelho Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index d0162d4..e88397b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -843,7 +843,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) int i; bool load_module = false; - fw->ucode_capa.max_probe_length = 200; + fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 1705d24..a122368 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -99,6 +99,9 @@ enum iwl_ucode_tlv_flag { #define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 #define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 +/* The default max probe length if not specified by the firmware file */ +#define IWL_DEFAULT_MAX_PROBE_LENGTH 200 + /** * enum iwl_ucode_type * -- cgit v0.10.2 From 3b1995ad83709ac2e1e86c99b37d5ba9ce410f56 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Mon, 12 Aug 2013 21:58:36 +0300 Subject: iwlwifi: Kconfig: fix help texts wrt 7260 and 3160 devices Fix the help texts to properly reflect the 7260 and 3160 devices support. Signed-off-by: Luciano Coelho Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index e5c133e..3eb2102 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -22,6 +22,8 @@ config IWLWIFI Intel Wireless WiFi Link 6150BGN 2 Adapter Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) Intel 2000 Series Wi-Fi Adapters + Intel 7260 Wi-Fi Adapter + Intel 3160 Wi-Fi Adapter This driver uses the kernel's mac80211 subsystem. @@ -46,17 +48,16 @@ config IWLDVM depends on IWLWIFI default IWLWIFI help - This is the driver supporting the DVM firmware which is - currently the only firmware available for existing devices. + This is the driver that supports the DVM firmware which is + used by most existing devices (with the exception of 7260 + and 3160). config IWLMVM tristate "Intel Wireless WiFi MVM Firmware support" depends on IWLWIFI help - This is the driver supporting the MVM firmware which is - currently only available for 7000 series devices. - - Say yes if you have such a device. + This is the driver that supports the MVM firmware which is + currently only available for 7260 and 3160 devices. # don't call it _MODULE -- will confuse Kconfig/fixdep/... config IWLWIFI_OPMODE_MODULAR -- cgit v0.10.2 From eafe25e0afaf45a4e38f9b3560ac774a2395c695 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 13 Aug 2013 10:34:55 +0300 Subject: iwlwifi: return -ENOMEM instead of NULL when OOM in iwl_drv_start() The callers of iwl_drv_start() are probe functions. If a probe function returns 0, it means it succeeded. So if NULL was returned by iwl_drv_start(), it would be considered as a success. Fix this by returning -ENOMEM if the driver struct allocation fails in iwl_drv_start(). Signed-off-by: Luciano Coelho Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index e88397b..99e1da3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1032,8 +1032,10 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, int ret; drv = kzalloc(sizeof(*drv), GFP_KERNEL); - if (!drv) - return NULL; + if (!drv) { + ret = -ENOMEM; + goto err; + } drv->trans = trans; drv->dev = trans->dev; @@ -1078,7 +1080,7 @@ err_free_dbgfs: err_free_drv: #endif kfree(drv); - +err: return ERR_PTR(ret); } diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index e179efe..567ef01 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -332,7 +332,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); - if (IS_ERR_OR_NULL(trans_pcie->drv)) { + if (IS_ERR(trans_pcie->drv)) { ret = PTR_ERR(trans_pcie->drv); goto out_free_trans; } -- cgit v0.10.2 From a20fd398666dbf5cdee3fe97722350cc10619c84 Mon Sep 17 00:00:00 2001 From: Andrei Otcheretianski Date: Sun, 21 Jul 2013 17:23:59 +0300 Subject: iwlwifi: mvm: Implement CQM offloading Use beacon statistics notification to track RSSI. Notify mac80211 when the tresholds are crossed. The roaming treshold is configured to be equal to cqm_thold. If the beacon filtering command is not supported by fw fall back and use mac80211 mechanism. Signed-off-by: Andrei Otcheretianski Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 14811a5..c67b17a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -920,7 +920,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file, }; iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); - if (mvmvif->bf_enabled) + if (mvmvif->bf_data.bf_enabled) cmd.bf_enable_beacon_filter = cpu_to_le32(1); else cmd.bf_enable_beacon_filter = 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index b104710..66264cc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -1321,7 +1321,7 @@ struct mvm_statistics_general { struct mvm_statistics_general_common common; __le32 beacon_filtered; __le32 missed_beacons; - __s8 beacon_filter_everage_energy; + __s8 beacon_filter_average_energy; __s8 beacon_filter_reason; __s8 beacon_filter_current_energy; __s8 beacon_filter_reserved; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 66803b9..692d2ea 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -575,7 +575,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ mvm->bf_allowed_vif = mvmvif; - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; } /* @@ -615,7 +616,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; - vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: mvmvif->phy_ctxt = NULL; @@ -681,7 +683,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; - vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); } iwl_mvm_vif_dbgfs_clean(mvm, vif); @@ -799,6 +802,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update quotas\n"); } + + /* reset rssi values */ + mvmvif->bf_data.ave_beacon_signal = 0; + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) { /* Workaround for FW bug, otherwise FW disables device * power save upon disassociation @@ -825,6 +832,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } + + if (changes & BSS_CHANGED_CQM) { + IWL_DEBUG_MAC80211(mvm, "cqm info_changed"); + /* reset cqm events tracking */ + mvmvif->bf_data.last_cqm_event = 0; + ret = iwl_mvm_update_beacon_filter(mvm, vif); + if (ret) + IWL_ERR(mvm, "failed to update CQM thresholds\n"); + } } static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 014d779..ee46dea 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -233,6 +233,21 @@ enum iwl_mvm_smps_type_request { }; /** +* struct iwl_mvm_vif_bf_data - beacon filtering related data +* @bf_enabled: indicates if beacon filtering is enabled +* @ba_enabled: indicated if beacon abort is enabled +* @last_beacon_signal: last beacon rssi signal in dbm +* @ave_beacon_signal: average beacon signal +* @last_cqm_event: rssi of the last cqm event +*/ +struct iwl_mvm_vif_bf_data { + bool bf_enabled; + bool ba_enabled; + s8 ave_beacon_signal; + s8 last_cqm_event; +}; + +/** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal @@ -257,8 +272,7 @@ struct iwl_mvm_vif { bool uploaded; bool ap_active; bool monitor_active; - /* indicate whether beacon filtering is enabled */ - bool bf_enabled; + struct iwl_mvm_vif_bf_data bf_data; u32 ap_beacon_time; @@ -758,6 +772,8 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, struct iwl_beacon_filter_cmd *cmd); int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool enable); +int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 9c9b5ba..a5529b8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -110,6 +110,23 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, return ret; } +static +void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->bss_conf.cqm_rssi_thold) { + cmd->bf_energy_delta = + cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); + /* fw uses an absolute value for this */ + cmd->bf_roaming_state = + cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); + } + cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); +} + int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool enable) { @@ -120,12 +137,14 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, .ba_enable_beacon_abort = cpu_to_le32(enable), }; - if (!mvmvif->bf_enabled) + if (!mvmvif->bf_data.bf_enabled) return 0; if (mvm->cur_ucode == IWL_UCODE_WOWLAN) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); + mvmvif->bf_data.ba_enabled = enable; + iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); } @@ -510,11 +529,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; + iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); if (!ret) - mvmvif->bf_enabled = true; + mvmvif->bf_data.bf_enabled = true; return ret; } @@ -533,11 +553,22 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); if (!ret) - mvmvif->bf_enabled = false; + mvmvif->bf_data.bf_enabled = false; return ret; } +int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (!mvmvif->bf_data.bf_enabled) + return 0; + + return iwl_mvm_enable_beacon_filter(mvm, vif); +} + const struct iwl_mvm_power_ops pm_mac_ops = { .power_update_mode = iwl_mvm_power_mac_update_mode, .power_disable = iwl_mvm_power_mac_disable, diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index ee6547d..2a8cb5a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -396,11 +396,62 @@ static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx)); } +struct iwl_mvm_stat_data { + struct iwl_notif_statistics *stats; + struct iwl_mvm *mvm; +}; + +static void iwl_mvm_stat_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_stat_data *data = _data; + struct iwl_notif_statistics *stats = data->stats; + struct iwl_mvm *mvm = data->mvm; + int sig = -stats->general.beacon_filter_average_energy; + int last_event; + int thold = vif->bss_conf.cqm_rssi_thold; + int hyst = vif->bss_conf.cqm_rssi_hyst; + u16 id = le32_to_cpu(stats->rx.general.mac_id); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->id != id) + return; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + mvmvif->bf_data.ave_beacon_signal = sig; + + if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) + return; + + /* CQM Notification */ + last_event = mvmvif->bf_data.last_cqm_event; + if (thold && sig < thold && (last_event == 0 || + sig < last_event - hyst)) { + mvmvif->bf_data.last_cqm_event = sig; + IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", + sig); + ieee80211_cqm_rssi_notify( + vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + GFP_KERNEL); + } else if (sig > thold && + (last_event == 0 || sig > last_event + hyst)) { + mvmvif->bf_data.last_cqm_event = sig; + IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", + sig); + ieee80211_cqm_rssi_notify( + vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + GFP_KERNEL); + } +} + /* * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler * * TODO: This handler is implemented partially. - * It only gets the NIC's temperature. */ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, @@ -409,6 +460,10 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_notif_statistics *stats = (void *)&pkt->data; struct mvm_statistics_general_common *common = &stats->general.common; + struct iwl_mvm_stat_data data = { + .stats = stats, + .mvm = mvm, + }; if (mvm->temperature != le32_to_cpu(common->temperature)) { mvm->temperature = le32_to_cpu(common->temperature); @@ -416,5 +471,9 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, } iwl_mvm_update_rx_statistics(mvm, stats); + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_stat_iterator, + &data); return 0; } -- cgit v0.10.2 From 89716344806bd49d213ad5960661e8c2d38c4982 Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Sun, 4 Aug 2013 17:52:23 +0300 Subject: iwlwifi: mvm: Add PBW snoozing enablement The Performance Based Window snooze mechanism is based on uAPSD and is used in low-medium traffic scenarios, in order to provide better power performance while insuring low latency and jitter for the incoming traffic. This patch enables PBW snoozing in case uAPSD is enabled and all ACs are uAPSD trigger and delivery enabled. Signed-off-by: Alexander Bondar Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h index 33f98fc..2bf29f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -73,5 +73,8 @@ #define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20 #define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50 #define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50 +#define IWL_MVM_PS_SNOOZE_INTERVAL 25 +#define IWL_MVM_PS_SNOOZE_WINDOW 50 +#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index c67b17a..2e60be2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -352,6 +352,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); dbgfs_pm->lprx_rssi_threshold = val; break; + case MVM_DEBUGFS_PM_SNOOZE_ENABLE: + IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val); + dbgfs_pm->snooze_ena = val; + break; } } @@ -405,6 +409,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file, POWER_LPRX_RSSI_THRESHOLD_MIN) return -EINVAL; param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; + } else if (!strncmp("snooze_enable=", buf, 14)) { + if (sscanf(buf + 14, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SNOOZE_ENABLE; } else { return -EINVAL; } diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index bb010b3..8e7ab41 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -155,8 +155,12 @@ struct iwl_powertable_cmd { * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set - * @snooze_interval: TBD - * @snooze_window: TBD + * @snooze_interval: Maximum time between attempts to retrieve buffered data + * from the AP [msec] + * @snooze_window: A window of time in which PBW snoozing insures that all + * packets received. It is also the minimum time from last + * received unicast RX packet, before client stops snoozing + * for data. [msec] * @snooze_step: TBD * @qndp_tid: TID client shall use for uAPSD QNDP triggers * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index ee46dea..0f33d7f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -180,6 +180,7 @@ enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5), MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), + MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), }; struct iwl_dbgfs_pm { @@ -191,6 +192,7 @@ struct iwl_dbgfs_pm { bool disable_power_off; bool lprx_ena; u32 lprx_rssi_threshold; + bool snooze_ena; int mask; }; diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index a5529b8..21407a3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -301,6 +301,20 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); + + if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | + BIT(IEEE80211_AC_VI) | + BIT(IEEE80211_AC_BE) | + BIT(IEEE80211_AC_BK))) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); + cmd->snooze_interval = + cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); + cmd->snooze_window = + (mvm->cur_ucode == IWL_UCODE_WOWLAN) ? + cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : + cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); + } + cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; cmd->heavy_tx_thld_packets = IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; @@ -340,6 +354,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) { + if (mvmvif->dbgfs_pm.snooze_ena) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); + else + cmd->flags &= + cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK); + } #endif /* CONFIG_IWLWIFI_DEBUGFS */ } @@ -475,6 +497,19 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n", cmd.heavy_rx_thld_percentage); + pos += + scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ? + 1 : 0); + } + if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { + pos += scnprintf(buf+pos, bufsz-pos, + "snooze_interval = %d\n", + cmd.snooze_interval); + pos += scnprintf(buf+pos, bufsz-pos, + "snooze_window = %d\n", + cmd.snooze_window); } } return pos; -- cgit v0.10.2 From 86c228a7627f3f2776893da47592234140fbfba8 Mon Sep 17 00:00:00 2001 From: Johan Almbladh Date: Wed, 14 Aug 2013 15:29:46 +0200 Subject: mac80211: perform power save processing before decryption This patch decouples the power save processing from the frame decryption by running the decrypt rx handler after sta_process. In the case where the decryption failed for some reason, the stack used to not process the PM and MOREDATA bits for that frame. The stack now always performs power save processing regardless of the decryption result. That means that encrypted data frames and NULLFUNC frames are now handled in the same way regarding power save processing, making the stack more robust. Signed-off-by: Johan Almbladh Signed-off-by: Johannes Berg diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0ac7512..ffad155 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1049,207 +1049,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) static ieee80211_rx_result debug_noinline -ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) -{ - struct sk_buff *skb = rx->skb; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int keyidx; - int hdrlen; - ieee80211_rx_result result = RX_DROP_UNUSABLE; - struct ieee80211_key *sta_ptk = NULL; - int mmie_keyidx = -1; - __le16 fc; - - /* - * Key selection 101 - * - * There are four types of keys: - * - GTK (group keys) - * - IGTK (group keys for management frames) - * - PTK (pairwise keys) - * - STK (station-to-station pairwise keys) - * - * When selecting a key, we have to distinguish between multicast - * (including broadcast) and unicast frames, the latter can only - * use PTKs and STKs while the former always use GTKs and IGTKs. - * Unless, of course, actual WEP keys ("pre-RSNA") are used, then - * unicast frames can also use key indices like GTKs. Hence, if we - * don't have a PTK/STK we check the key index for a WEP key. - * - * Note that in a regular BSS, multicast frames are sent by the - * AP only, associated stations unicast the frame to the AP first - * which then multicasts it on their behalf. - * - * There is also a slight problem in IBSS mode: GTKs are negotiated - * with each station, that is something we don't currently handle. - * The spec seems to expect that one negotiates the same key with - * every station but there's no such requirement; VLANs could be - * possible. - */ - - /* - * No point in finding a key and decrypting if the frame is neither - * addressed to us nor a multicast frame. - */ - if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) - return RX_CONTINUE; - - /* start without a key */ - rx->key = NULL; - - if (rx->sta) - sta_ptk = rcu_dereference(rx->sta->ptk); - - fc = hdr->frame_control; - - if (!ieee80211_has_protected(fc)) - mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); - - if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { - rx->key = sta_ptk; - if ((status->flag & RX_FLAG_DECRYPTED) && - (status->flag & RX_FLAG_IV_STRIPPED)) - return RX_CONTINUE; - /* Skip decryption if the frame is not protected. */ - if (!ieee80211_has_protected(fc)) - return RX_CONTINUE; - } else if (mmie_keyidx >= 0) { - /* Broadcast/multicast robust management frame / BIP */ - if ((status->flag & RX_FLAG_DECRYPTED) && - (status->flag & RX_FLAG_IV_STRIPPED)) - return RX_CONTINUE; - - if (mmie_keyidx < NUM_DEFAULT_KEYS || - mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) - return RX_DROP_MONITOR; /* unexpected BIP keyidx */ - if (rx->sta) - rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); - if (!rx->key) - rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); - } else if (!ieee80211_has_protected(fc)) { - /* - * The frame was not protected, so skip decryption. However, we - * need to set rx->key if there is a key that could have been - * used so that the frame may be dropped if encryption would - * have been expected. - */ - struct ieee80211_key *key = NULL; - struct ieee80211_sub_if_data *sdata = rx->sdata; - int i; - - if (ieee80211_is_mgmt(fc) && - is_multicast_ether_addr(hdr->addr1) && - (key = rcu_dereference(rx->sdata->default_mgmt_key))) - rx->key = key; - else { - if (rx->sta) { - for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(rx->sta->gtk[i]); - if (key) - break; - } - } - if (!key) { - for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(sdata->keys[i]); - if (key) - break; - } - } - if (key) - rx->key = key; - } - return RX_CONTINUE; - } else { - u8 keyid; - /* - * The device doesn't give us the IV so we won't be - * able to look up the key. That's ok though, we - * don't need to decrypt the frame, we just won't - * be able to keep statistics accurate. - * Except for key threshold notifications, should - * we somehow allow the driver to tell us which key - * the hardware used if this flag is set? - */ - if ((status->flag & RX_FLAG_DECRYPTED) && - (status->flag & RX_FLAG_IV_STRIPPED)) - return RX_CONTINUE; - - hdrlen = ieee80211_hdrlen(fc); - - if (rx->skb->len < 8 + hdrlen) - return RX_DROP_UNUSABLE; /* TODO: count this? */ - - /* - * no need to call ieee80211_wep_get_keyidx, - * it verifies a bunch of things we've done already - */ - skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); - keyidx = keyid >> 6; - - /* check per-station GTK first, if multicast packet */ - if (is_multicast_ether_addr(hdr->addr1) && rx->sta) - rx->key = rcu_dereference(rx->sta->gtk[keyidx]); - - /* if not found, try default key */ - if (!rx->key) { - rx->key = rcu_dereference(rx->sdata->keys[keyidx]); - - /* - * RSNA-protected unicast frames should always be - * sent with pairwise or station-to-station keys, - * but for WEP we allow using a key index as well. - */ - if (rx->key && - rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && - rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && - !is_multicast_ether_addr(hdr->addr1)) - rx->key = NULL; - } - } - - if (rx->key) { - if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) - return RX_DROP_MONITOR; - - rx->key->tx_rx_count++; - /* TODO: add threshold stuff again */ - } else { - return RX_DROP_MONITOR; - } - - switch (rx->key->conf.cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - result = ieee80211_crypto_wep_decrypt(rx); - break; - case WLAN_CIPHER_SUITE_TKIP: - result = ieee80211_crypto_tkip_decrypt(rx); - break; - case WLAN_CIPHER_SUITE_CCMP: - result = ieee80211_crypto_ccmp_decrypt(rx); - break; - case WLAN_CIPHER_SUITE_AES_CMAC: - result = ieee80211_crypto_aes_cmac_decrypt(rx); - break; - default: - /* - * We can reach here only with HW-only algorithms - * but why didn't it decrypt the frame?! - */ - return RX_DROP_UNUSABLE; - } - - /* the hdr variable is invalid after the decrypt handlers */ - - /* either the frame has been decrypted or will be dropped */ - status->flag |= RX_FLAG_DECRYPTED; - - return result; -} - -static ieee80211_rx_result debug_noinline ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) { struct ieee80211_local *local; @@ -1550,6 +1349,207 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) return RX_CONTINUE; } /* ieee80211_rx_h_sta_process */ +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int keyidx; + int hdrlen; + ieee80211_rx_result result = RX_DROP_UNUSABLE; + struct ieee80211_key *sta_ptk = NULL; + int mmie_keyidx = -1; + __le16 fc; + + /* + * Key selection 101 + * + * There are four types of keys: + * - GTK (group keys) + * - IGTK (group keys for management frames) + * - PTK (pairwise keys) + * - STK (station-to-station pairwise keys) + * + * When selecting a key, we have to distinguish between multicast + * (including broadcast) and unicast frames, the latter can only + * use PTKs and STKs while the former always use GTKs and IGTKs. + * Unless, of course, actual WEP keys ("pre-RSNA") are used, then + * unicast frames can also use key indices like GTKs. Hence, if we + * don't have a PTK/STK we check the key index for a WEP key. + * + * Note that in a regular BSS, multicast frames are sent by the + * AP only, associated stations unicast the frame to the AP first + * which then multicasts it on their behalf. + * + * There is also a slight problem in IBSS mode: GTKs are negotiated + * with each station, that is something we don't currently handle. + * The spec seems to expect that one negotiates the same key with + * every station but there's no such requirement; VLANs could be + * possible. + */ + + /* + * No point in finding a key and decrypting if the frame is neither + * addressed to us nor a multicast frame. + */ + if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) + return RX_CONTINUE; + + /* start without a key */ + rx->key = NULL; + + if (rx->sta) + sta_ptk = rcu_dereference(rx->sta->ptk); + + fc = hdr->frame_control; + + if (!ieee80211_has_protected(fc)) + mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); + + if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { + rx->key = sta_ptk; + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + /* Skip decryption if the frame is not protected. */ + if (!ieee80211_has_protected(fc)) + return RX_CONTINUE; + } else if (mmie_keyidx >= 0) { + /* Broadcast/multicast robust management frame / BIP */ + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + if (mmie_keyidx < NUM_DEFAULT_KEYS || + mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) + return RX_DROP_MONITOR; /* unexpected BIP keyidx */ + if (rx->sta) + rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); + if (!rx->key) + rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); + } else if (!ieee80211_has_protected(fc)) { + /* + * The frame was not protected, so skip decryption. However, we + * need to set rx->key if there is a key that could have been + * used so that the frame may be dropped if encryption would + * have been expected. + */ + struct ieee80211_key *key = NULL; + struct ieee80211_sub_if_data *sdata = rx->sdata; + int i; + + if (ieee80211_is_mgmt(fc) && + is_multicast_ether_addr(hdr->addr1) && + (key = rcu_dereference(rx->sdata->default_mgmt_key))) + rx->key = key; + else { + if (rx->sta) { + for (i = 0; i < NUM_DEFAULT_KEYS; i++) { + key = rcu_dereference(rx->sta->gtk[i]); + if (key) + break; + } + } + if (!key) { + for (i = 0; i < NUM_DEFAULT_KEYS; i++) { + key = rcu_dereference(sdata->keys[i]); + if (key) + break; + } + } + if (key) + rx->key = key; + } + return RX_CONTINUE; + } else { + u8 keyid; + /* + * The device doesn't give us the IV so we won't be + * able to look up the key. That's ok though, we + * don't need to decrypt the frame, we just won't + * be able to keep statistics accurate. + * Except for key threshold notifications, should + * we somehow allow the driver to tell us which key + * the hardware used if this flag is set? + */ + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + hdrlen = ieee80211_hdrlen(fc); + + if (rx->skb->len < 8 + hdrlen) + return RX_DROP_UNUSABLE; /* TODO: count this? */ + + /* + * no need to call ieee80211_wep_get_keyidx, + * it verifies a bunch of things we've done already + */ + skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); + keyidx = keyid >> 6; + + /* check per-station GTK first, if multicast packet */ + if (is_multicast_ether_addr(hdr->addr1) && rx->sta) + rx->key = rcu_dereference(rx->sta->gtk[keyidx]); + + /* if not found, try default key */ + if (!rx->key) { + rx->key = rcu_dereference(rx->sdata->keys[keyidx]); + + /* + * RSNA-protected unicast frames should always be + * sent with pairwise or station-to-station keys, + * but for WEP we allow using a key index as well. + */ + if (rx->key && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && + !is_multicast_ether_addr(hdr->addr1)) + rx->key = NULL; + } + } + + if (rx->key) { + if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) + return RX_DROP_MONITOR; + + rx->key->tx_rx_count++; + /* TODO: add threshold stuff again */ + } else { + return RX_DROP_MONITOR; + } + + switch (rx->key->conf.cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + result = ieee80211_crypto_wep_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_TKIP: + result = ieee80211_crypto_tkip_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_CCMP: + result = ieee80211_crypto_ccmp_decrypt(rx); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + result = ieee80211_crypto_aes_cmac_decrypt(rx); + break; + default: + /* + * We can reach here only with HW-only algorithms + * but why didn't it decrypt the frame?! + */ + return RX_DROP_UNUSABLE; + } + + /* the hdr variable is invalid after the decrypt handlers */ + + /* either the frame has been decrypted or will be dropped */ + status->flag |= RX_FLAG_DECRYPTED; + + return result; +} + static inline struct ieee80211_fragment_entry * ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, unsigned int frag, unsigned int seq, int rx_queue, @@ -2933,10 +2933,10 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, */ rx->skb = skb; - CALL_RXH(ieee80211_rx_h_decrypt) CALL_RXH(ieee80211_rx_h_check_more_data) CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) CALL_RXH(ieee80211_rx_h_sta_process) + CALL_RXH(ieee80211_rx_h_decrypt) CALL_RXH(ieee80211_rx_h_defragment) CALL_RXH(ieee80211_rx_h_michael_mic_verify) /* must be after MMIC verify so header is counted in MPDU mic */ -- cgit v0.10.2 From d51b70ff5122d31e27733ba03c3afd62bb86bd63 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 9 Aug 2013 16:35:17 +0200 Subject: mac80211: move ibss presp generation in own function Channel Switch will later require to generate beacons without setting them immediately. Therefore split the presp generation in an own function. Splitting the original very long function might be a good idea anyway. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 79e294e..74de0f1 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -34,13 +34,12 @@ #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 - -static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, - const u8 *bssid, const int beacon_int, - struct ieee80211_channel *chan, - const u32 basic_rates, - const u16 capability, u64 tsf, - bool creator) +static struct beacon_data * +ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, + const int beacon_int, const u32 basic_rates, + const u16 capability, u64 tsf, + struct cfg80211_chan_def *chandef, + bool *have_higher_than_11mbit) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; @@ -48,70 +47,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt; u8 *pos; struct ieee80211_supported_band *sband; - struct cfg80211_bss *bss; - u32 bss_change, rate_flags, rates = 0, rates_added = 0; - struct cfg80211_chan_def chandef; - enum nl80211_bss_scan_width scan_width; - bool have_higher_than_11mbit = false; + u32 rate_flags, rates = 0, rates_added = 0; struct beacon_data *presp; int frame_len; int shift; - sdata_assert_lock(sdata); - - /* Reset own TSF to allow time synchronization work. */ - drv_reset_tsf(local, sdata); - - if (!ether_addr_equal(ifibss->bssid, bssid)) - sta_info_flush(sdata); - - /* if merging, indicate to driver that we leave the old IBSS */ - if (sdata->vif.bss_conf.ibss_joined) { - sdata->vif.bss_conf.ibss_joined = false; - sdata->vif.bss_conf.ibss_creator = false; - sdata->vif.bss_conf.enable_beacon = false; - netif_carrier_off(sdata->dev); - ieee80211_bss_info_change_notify(sdata, - BSS_CHANGED_IBSS | - BSS_CHANGED_BEACON_ENABLED); - } - - presp = rcu_dereference_protected(ifibss->presp, - lockdep_is_held(&sdata->wdev.mtx)); - rcu_assign_pointer(ifibss->presp, NULL); - if (presp) - kfree_rcu(presp, rcu_head); - - sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; - - chandef = ifibss->chandef; - if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { - if (chandef.width == NL80211_CHAN_WIDTH_5 || - chandef.width == NL80211_CHAN_WIDTH_10 || - chandef.width == NL80211_CHAN_WIDTH_20_NOHT || - chandef.width == NL80211_CHAN_WIDTH_20) { - sdata_info(sdata, - "Failed to join IBSS, beacons forbidden\n"); - return; - } - chandef.width = NL80211_CHAN_WIDTH_20; - chandef.center_freq1 = chan->center_freq; - } - - ieee80211_vif_release_channel(sdata); - if (ieee80211_vif_use_channel(sdata, &chandef, - ifibss->fixed_channel ? - IEEE80211_CHANCTX_SHARED : - IEEE80211_CHANCTX_EXCLUSIVE)) { - sdata_info(sdata, "Failed to join IBSS, no channel context\n"); - return; - } - - memcpy(ifibss->bssid, bssid, ETH_ALEN); - - sband = local->hw.wiphy->bands[chan->band]; - shift = ieee80211_vif_get_shift(&sdata->vif); - /* Build IBSS probe response */ frame_len = sizeof(struct ieee80211_hdr_3addr) + 12 /* struct ieee80211_mgmt.u.beacon */ + @@ -125,7 +65,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, ifibss->ie_len; presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL); if (!presp) - return; + return NULL; presp->head = (void *)(presp + 1); @@ -146,12 +86,19 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, memcpy(pos, ifibss->ssid, ifibss->ssid_len); pos += ifibss->ssid_len; - rate_flags = ieee80211_chandef_rate_flags(&chandef); + sband = local->hw.wiphy->bands[chandef->chan->band]; + rate_flags = ieee80211_chandef_rate_flags(chandef); + shift = ieee80211_chandef_get_shift(chandef); + rates_n = 0; + if (have_higher_than_11mbit) + *have_higher_than_11mbit = false; + for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; - if (sband->bitrates[i].bitrate > 110) - have_higher_than_11mbit = true; + if (sband->bitrates[i].bitrate > 110 && + have_higher_than_11mbit) + *have_higher_than_11mbit = true; rates |= BIT(i); rates_n++; @@ -178,7 +125,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, if (sband->band == IEEE80211_BAND_2GHZ) { *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; - *pos++ = ieee80211_frequency_to_channel(chan->center_freq); + *pos++ = ieee80211_frequency_to_channel( + chandef->chan->center_freq); } *pos++ = WLAN_EID_IBSS_PARAMS; @@ -210,9 +158,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, } /* add HT capability and information IEs */ - if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT && - chandef.width != NL80211_CHAN_WIDTH_5 && - chandef.width != NL80211_CHAN_WIDTH_10 && + if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT && + chandef->width != NL80211_CHAN_WIDTH_5 && + chandef->width != NL80211_CHAN_WIDTH_10 && sband->ht_cap.ht_supported) { struct ieee80211_sta_ht_cap ht_cap; @@ -226,7 +174,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, * keep them at 0 */ pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, - &chandef, 0); + chandef, 0); } if (local->hw.queues >= IEEE80211_NUM_ACS) { @@ -243,9 +191,94 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, presp->head_len = pos - presp->head; if (WARN_ON(presp->head_len > frame_len)) + goto error; + + return presp; +error: + kfree(presp); + return NULL; +} + +static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const int beacon_int, + struct ieee80211_channel *chan, + const u32 basic_rates, + const u16 capability, u64 tsf, + bool creator) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + struct ieee80211_mgmt *mgmt; + struct cfg80211_bss *bss; + u32 bss_change; + struct cfg80211_chan_def chandef; + struct beacon_data *presp; + enum nl80211_bss_scan_width scan_width; + bool have_higher_than_11mbit; + + sdata_assert_lock(sdata); + + /* Reset own TSF to allow time synchronization work. */ + drv_reset_tsf(local, sdata); + + if (!ether_addr_equal(ifibss->bssid, bssid)) + sta_info_flush(sdata); + + /* if merging, indicate to driver that we leave the old IBSS */ + if (sdata->vif.bss_conf.ibss_joined) { + sdata->vif.bss_conf.ibss_joined = false; + sdata->vif.bss_conf.ibss_creator = false; + sdata->vif.bss_conf.enable_beacon = false; + netif_carrier_off(sdata->dev); + ieee80211_bss_info_change_notify(sdata, + BSS_CHANGED_IBSS | + BSS_CHANGED_BEACON_ENABLED); + } + + presp = rcu_dereference_protected(ifibss->presp, + lockdep_is_held(&sdata->wdev.mtx)); + rcu_assign_pointer(ifibss->presp, NULL); + if (presp) + kfree_rcu(presp, rcu_head); + + sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; + + chandef = ifibss->chandef; + if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { + if (chandef.width == NL80211_CHAN_WIDTH_5 || + chandef.width == NL80211_CHAN_WIDTH_10 || + chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + chandef.width == NL80211_CHAN_WIDTH_20) { + sdata_info(sdata, + "Failed to join IBSS, beacons forbidden\n"); + return; + } + chandef.width = NL80211_CHAN_WIDTH_20; + chandef.center_freq1 = chan->center_freq; + } + + ieee80211_vif_release_channel(sdata); + if (ieee80211_vif_use_channel(sdata, &chandef, + ifibss->fixed_channel ? + IEEE80211_CHANCTX_SHARED : + IEEE80211_CHANCTX_EXCLUSIVE)) { + sdata_info(sdata, "Failed to join IBSS, no channel context\n"); + return; + } + + memcpy(ifibss->bssid, bssid, ETH_ALEN); + + sband = local->hw.wiphy->bands[chan->band]; + + presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates, + capability, tsf, &chandef, + &have_higher_than_11mbit); + if (!presp) return; rcu_assign_pointer(ifibss->presp, presp); + mgmt = (void *)presp->head; sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.beacon_int = beacon_int; -- cgit v0.10.2 From 27b3eb9c06a7193bdc9800cd00764a130343bc8a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 7 Aug 2013 20:11:55 +0200 Subject: mac80211: add APIs to allow keeping connections after WoWLAN In order to be able to (securely) keep connections alive after the system was suspended for WoWLAN, we need some additional APIs. We already have API (ieee80211_gtk_rekey_notify) to tell wpa_supplicant about the new replay counter if GTK rekeying was done by the device while the host was asleep, but that's not sufficient. If GTK rekeying wasn't done, we need to tell the host about sequence counters for the GTK (and PTK regardless of rekeying) that was used while asleep, add ieee80211_set_key_rx_seq() for that. If GTK rekeying was done, then we need to be able to disable the old keys (with ieee80211_remove_key()) and allocate the new GTK key(s) in mac80211 (with ieee80211_gtk_rekey_add()). If protocol offload (e.g. ARP) is implemented, then also the TX sequence counter for the PTK must be updated, using the new ieee80211_set_key_tx_seq() function. Signed-off-by: Johannes Berg diff --git a/include/net/mac80211.h b/include/net/mac80211.h index df93c77..e3e3037 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3688,6 +3688,89 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq); /** + * ieee80211_set_key_tx_seq - set key TX sequence counter + * + * @keyconf: the parameter passed with the set key + * @seq: new sequence data + * + * This function allows a driver to set the current TX IV/PNs for the + * given key. This is useful when resuming from WoWLAN sleep and the + * device may have transmitted frames using the PTK, e.g. replies to + * ARP requests. + * + * Note that this function may only be called when no TX processing + * can be done concurrently. + */ +void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, + struct ieee80211_key_seq *seq); + +/** + * ieee80211_set_key_rx_seq - set key RX sequence counter + * + * @keyconf: the parameter passed with the set key + * @tid: The TID, or -1 for the management frame value (CCMP only); + * the value on TID 0 is also used for non-QoS frames. For + * CMAC, only TID 0 is valid. + * @seq: new sequence data + * + * This function allows a driver to set the current RX IV/PNs for the + * given key. This is useful when resuming from WoWLAN sleep and GTK + * rekey may have been done while suspended. It should not be called + * if IV checking is done by the device and not by mac80211. + * + * Note that this function may only be called when no RX processing + * can be done concurrently. + */ +void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, + int tid, struct ieee80211_key_seq *seq); + +/** + * ieee80211_remove_key - remove the given key + * @keyconf: the parameter passed with the set key + * + * Remove the given key. If the key was uploaded to the hardware at the + * time this function is called, it is not deleted in the hardware but + * instead assumed to have been removed already. + * + * Note that due to locking considerations this function can (currently) + * only be called during key iteration (ieee80211_iter_keys().) + */ +void ieee80211_remove_key(struct ieee80211_key_conf *keyconf); + +/** + * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN + * @vif: the virtual interface to add the key on + * @keyconf: new key data + * + * When GTK rekeying was done while the system was suspended, (a) new + * key(s) will be available. These will be needed by mac80211 for proper + * RX processing, so this function allows setting them. + * + * The function returns the newly allocated key structure, which will + * have similar contents to the passed key configuration but point to + * mac80211-owned memory. In case of errors, the function returns an + * ERR_PTR(), use IS_ERR() etc. + * + * Note that this function assumes the key isn't added to hardware + * acceleration, so no TX will be done with the key. Since it's a GTK + * on managed (station) networks, this is true anyway. If the driver + * calls this function from the resume callback and subsequently uses + * the return code 1 to reconfigure the device, this key will be part + * of the reconfiguration. + * + * Note that the driver should also call ieee80211_set_key_rx_seq() + * for the new key for each TID to set up sequence counters properly. + * + * IMPORTANT: If this replaces a key that is present in the hardware, + * then it will attempt to remove it during this call. In many cases + * this isn't what you want, so call ieee80211_remove_key() first for + * the key that's being replaced. + */ +struct ieee80211_key_conf * +ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf); + +/** * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying * @vif: virtual interface the rekeying was done on * @bssid: The BSSID of the AP, for checking association diff --git a/net/mac80211/key.c b/net/mac80211/key.c index e39cc91..620677e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -93,6 +93,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) might_sleep(); + if (key->flags & KEY_FLAG_TAINTED) + return -EINVAL; + if (!key->local->ops->set_key) goto out_unsupported; @@ -455,6 +458,7 @@ int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { + struct ieee80211_local *local = sdata->local; struct ieee80211_key *old_key; int idx, ret; bool pairwise; @@ -484,10 +488,13 @@ int ieee80211_key_link(struct ieee80211_key *key, ieee80211_debugfs_key_add(key); - ret = ieee80211_key_enable_hw_accel(key); - - if (ret) - ieee80211_key_free(key, true); + if (!local->wowlan) { + ret = ieee80211_key_enable_hw_accel(key); + if (ret) + ieee80211_key_free(key, true); + } else { + ret = 0; + } mutex_unlock(&sdata->local->key_mtx); @@ -540,7 +547,7 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw, void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_key *key; + struct ieee80211_key *key, *tmp; struct ieee80211_sub_if_data *sdata; ASSERT_RTNL(); @@ -548,13 +555,14 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw, mutex_lock(&local->key_mtx); if (vif) { sdata = vif_to_sdata(vif); - list_for_each_entry(key, &sdata->key_list, list) + list_for_each_entry_safe(key, tmp, &sdata->key_list, list) iter(hw, &sdata->vif, key->sta ? &key->sta->sta : NULL, &key->conf, iter_data); } else { list_for_each_entry(sdata, &local->interfaces, list) - list_for_each_entry(key, &sdata->key_list, list) + list_for_each_entry_safe(key, tmp, + &sdata->key_list, list) iter(hw, &sdata->vif, key->sta ? &key->sta->sta : NULL, &key->conf, iter_data); @@ -751,3 +759,135 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, } } EXPORT_SYMBOL(ieee80211_get_key_rx_seq); + +void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, + struct ieee80211_key_seq *seq) +{ + struct ieee80211_key *key; + u64 pn64; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->u.tkip.tx.iv32 = seq->tkip.iv32; + key->u.tkip.tx.iv16 = seq->tkip.iv16; + break; + case WLAN_CIPHER_SUITE_CCMP: + pn64 = (u64)seq->ccmp.pn[5] | + ((u64)seq->ccmp.pn[4] << 8) | + ((u64)seq->ccmp.pn[3] << 16) | + ((u64)seq->ccmp.pn[2] << 24) | + ((u64)seq->ccmp.pn[1] << 32) | + ((u64)seq->ccmp.pn[0] << 40); + atomic64_set(&key->u.ccmp.tx_pn, pn64); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + pn64 = (u64)seq->aes_cmac.pn[5] | + ((u64)seq->aes_cmac.pn[4] << 8) | + ((u64)seq->aes_cmac.pn[3] << 16) | + ((u64)seq->aes_cmac.pn[2] << 24) | + ((u64)seq->aes_cmac.pn[1] << 32) | + ((u64)seq->aes_cmac.pn[0] << 40); + atomic64_set(&key->u.aes_cmac.tx_pn, pn64); + break; + default: + WARN_ON(1); + break; + } +} +EXPORT_SYMBOL_GPL(ieee80211_set_key_tx_seq); + +void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, + int tid, struct ieee80211_key_seq *seq) +{ + struct ieee80211_key *key; + u8 *pn; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) + return; + key->u.tkip.rx[tid].iv32 = seq->tkip.iv32; + key->u.tkip.rx[tid].iv16 = seq->tkip.iv16; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) + return; + if (tid < 0) + pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; + else + pn = key->u.ccmp.rx_pn[tid]; + memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + if (WARN_ON(tid != 0)) + return; + pn = key->u.aes_cmac.rx_pn; + memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN); + break; + default: + WARN_ON(1); + break; + } +} +EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq); + +void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_key *key; + + key = container_of(keyconf, struct ieee80211_key, conf); + + assert_key_lock(key->local); + + /* + * if key was uploaded, we assume the driver will/has remove(d) + * it, so adjust bookkeeping accordingly + */ + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; + + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + increment_tailroom_need_count(key->sdata); + } + + ieee80211_key_free(key, false); +} +EXPORT_SYMBOL_GPL(ieee80211_remove_key); + +struct ieee80211_key_conf * +ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key; + int err; + + if (WARN_ON(!local->wowlan)) + return ERR_PTR(-EINVAL); + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) + return ERR_PTR(-EINVAL); + + key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx, + keyconf->keylen, keyconf->key, + 0, NULL); + if (IS_ERR(key)) + return ERR_PTR(PTR_ERR(key)); + + if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) + key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; + + err = ieee80211_key_link(key, sdata, NULL); + if (err) + return ERR_PTR(err); + + return &key->conf; +} +EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d23c5a7..e1b34a1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1453,8 +1453,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) local->resuming = true; if (local->wowlan) { - local->wowlan = false; res = drv_resume(local); + local->wowlan = false; if (res < 0) { local->resuming = false; return res; -- cgit v0.10.2 From 92367fe7f24159d6ba83276bc7a0f45c6f663837 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:30 +0200 Subject: ath9k: always use SIFS times from OFDM for 5/10 MHz 5/10 MHz channels should always use SIFS times as defined in IEEE 802.11-2012 18.4.4 (OFDM PHY characteristics). This makes it compatible to ath5k, which does the same. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 151443b..b3a6891 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1069,7 +1069,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 11; - sifstime *= 2; + sifstime = 32; ack_offset = 16; slottime = 13; } else if (IS_CHAN_QUARTER_RATE(chan)) { @@ -1079,7 +1079,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 22; - sifstime *= 4; + sifstime = 64; ack_offset = 32; slottime = 21; } else { @@ -1116,7 +1116,6 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) ctstimeout += 48 - sifstime - ah->slottime; } - ath9k_hw_set_sifs_time(ah, sifstime); ath9k_hw_setslottime(ah, slottime); ath9k_hw_set_ack_timeout(ah, acktimeout); -- cgit v0.10.2 From 0671894f977b6f03b63fddc33743474f495db4eb Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 16 Aug 2013 10:46:04 +0200 Subject: ath9k: use chandef instead of channel_type To enable support for 5/10 MHz, some internal functions must be converted from using the (old) channel_type to chandef. This is a good chance to change all remaining occurences. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index 344fdde..d3063c2 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -49,37 +49,40 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb) } EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype); -static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type) +static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef) { u32 chanmode = 0; - switch (chan->band) { + switch (chandef->chan->band) { case IEEE80211_BAND_2GHZ: - switch (channel_type) { - case NL80211_CHAN_NO_HT: - case NL80211_CHAN_HT20: + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: chanmode = CHANNEL_G_HT20; break; - case NL80211_CHAN_HT40PLUS: - chanmode = CHANNEL_G_HT40PLUS; + case NL80211_CHAN_WIDTH_40: + if (chandef->center_freq1 > chandef->chan->center_freq) + chanmode = CHANNEL_G_HT40PLUS; + else + chanmode = CHANNEL_G_HT40MINUS; break; - case NL80211_CHAN_HT40MINUS: - chanmode = CHANNEL_G_HT40MINUS; + default: break; } break; case IEEE80211_BAND_5GHZ: - switch (channel_type) { - case NL80211_CHAN_NO_HT: - case NL80211_CHAN_HT20: + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: chanmode = CHANNEL_A_HT20; break; - case NL80211_CHAN_HT40PLUS: - chanmode = CHANNEL_A_HT40PLUS; + case NL80211_CHAN_WIDTH_40: + if (chandef->center_freq1 > chandef->chan->center_freq) + chanmode = CHANNEL_A_HT40PLUS; + else + chanmode = CHANNEL_A_HT40MINUS; break; - case NL80211_CHAN_HT40MINUS: - chanmode = CHANNEL_A_HT40MINUS; + default: break; } break; @@ -94,13 +97,12 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan, * Update internal channel flags. */ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type) + struct cfg80211_chan_def *chandef) { - ichan->channel = chan->center_freq; - ichan->chan = chan; + ichan->channel = chandef->chan->center_freq; + ichan->chan = chandef->chan; - if (chan->band == IEEE80211_BAND_2GHZ) { + if (chandef->chan->band == IEEE80211_BAND_2GHZ) { ichan->chanmode = CHANNEL_G; ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM; } else { @@ -108,8 +110,22 @@ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; } - if (channel_type != NL80211_CHAN_NO_HT) - ichan->chanmode = ath9k_get_extchanmode(chan, channel_type); + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + ichan->channelFlags |= CHANNEL_QUARTER; + break; + case NL80211_CHAN_WIDTH_10: + ichan->channelFlags |= CHANNEL_HALF; + break; + case NL80211_CHAN_WIDTH_20_NOHT: + break; + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + ichan->chanmode = ath9k_get_extchanmode(chandef); + break; + default: + WARN_ON(1); + } } EXPORT_SYMBOL(ath9k_cmn_update_ichannel); @@ -125,8 +141,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, chan_idx = curchan->hw_value; channel = &ah->channels[chan_idx]; - ath9k_cmn_update_ichannel(channel, curchan, - cfg80211_get_chandef_type(&hw->conf.chandef)); + ath9k_cmn_update_ichannel(channel, &hw->conf.chandef); return channel; } diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 207d069..e039bcb 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -44,8 +44,7 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type); + struct cfg80211_chan_def *chandef); struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, struct ath_hw *ah); int ath9k_cmn_count_streams(unsigned int chainmask, int max); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 5c1bec1..d442581 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1203,16 +1203,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) { struct ieee80211_channel *curchan = hw->conf.chandef.chan; - enum nl80211_channel_type channel_type = - cfg80211_get_chandef_type(&hw->conf.chandef); int pos = curchan->hw_value; ath_dbg(common, CONFIG, "Set channel: %d MHz\n", curchan->center_freq); ath9k_cmn_update_ichannel(&priv->ah->channels[pos], - hw->conf.chandef.chan, - channel_type); + &hw->conf.chandef); if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { ath_err(common, "Unable to set channel\n"); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 3b56c2e..85015bf 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -726,13 +726,15 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band) struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; struct ath_hw *ah = sc->sc_ah; + struct cfg80211_chan_def chandef; int i; sband = &sc->sbands[band]; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; ah->curchan = &ah->channels[chan->hw_value]; - ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20); + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); + ath9k_cmn_update_ichannel(ah->curchan, &chandef); ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true); } } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 0bee105..ba382a8 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1201,8 +1201,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) { struct ieee80211_channel *curchan = hw->conf.chandef.chan; - enum nl80211_channel_type channel_type = - cfg80211_get_chandef_type(&conf->chandef); int pos = curchan->hw_value; int old_pos = -1; unsigned long flags; @@ -1210,8 +1208,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (ah->curchan) old_pos = ah->curchan - &ah->channels[0]; - ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n", - curchan->center_freq, channel_type); + ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n", + curchan->center_freq, hw->conf.chandef.width); /* update survey stats for the old channel before switching */ spin_lock_irqsave(&common->cc_lock, flags); @@ -1219,7 +1217,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) spin_unlock_irqrestore(&common->cc_lock, flags); ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], - curchan, channel_type); + &conf->chandef); /* * If the operating channel changes, change the survey in-use flags diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index a3c4ca0..7e86abb 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1326,8 +1326,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, ath_rc_init(sc, priv_sta); ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, - "Operating HT Bandwidth changed to: %d\n", - cfg80211_get_chandef_type(&sc->hw->conf.chandef)); + "Operating Bandwidth changed to: %d\n", + sc->hw->conf.chandef.width); } } -- cgit v0.10.2 From f819c0e72951f9238c53d6b7675bbd7a82c78b83 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:32 +0200 Subject: ath9k: report 5/10 MHz channels Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 653f7fc..4ee472a 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -852,6 +852,17 @@ static int ath9k_process_rate(struct ath_common *common, band = hw->conf.chandef.chan->band; sband = hw->wiphy->bands[band]; + switch (hw->conf.chandef.width) { + case NL80211_CHAN_WIDTH_5: + rxs->flag |= RX_FLAG_5MHZ; + break; + case NL80211_CHAN_WIDTH_10: + rxs->flag |= RX_FLAG_10MHZ; + break; + default: + break; + } + if (rx_stats->rs_rate & 0x80) { /* HT rate */ rxs->flag |= RX_FLAG_HT; -- cgit v0.10.2 From 67a5533015be005caff61ee9cae42920a54e3a9b Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:33 +0200 Subject: ath9k: set 5/10 MHz supported channels and fix bitrate Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 85015bf..6347378 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -146,14 +146,22 @@ static struct ieee80211_rate ath9k_legacy_rates[] = { RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), - RATE(60, 0x0b, 0), - RATE(90, 0x0f, 0), - RATE(120, 0x0a, 0), - RATE(180, 0x0e, 0), - RATE(240, 0x09, 0), - RATE(360, 0x0d, 0), - RATE(480, 0x08, 0), - RATE(540, 0x0c, 0), + RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), + RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), + RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), + RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), + RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), + RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), + RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), + RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ)), }; #ifdef CONFIG_MAC80211_LEDS diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 7e86abb..d3d7c51 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1282,9 +1282,14 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_rate_priv *ath_rc_priv = priv_sta; int i, j = 0; + u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef); for (i = 0; i < sband->n_bitrates; i++) { if (sta->supp_rates[sband->band] & BIT(i)) { + if ((rate_flags & sband->bitrates[i].flags) + != rate_flags) + continue; + ath_rc_priv->neg_rates.rs_rates[j] = (sband->bitrates[i].bitrate * 2) / 10; j++; -- cgit v0.10.2 From 6fac8bbcd5cd77ea561409c3c0ae16b4e7b9fc3e Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:34 +0200 Subject: ath9k: announce that ath9k supports 5/10 MHz Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 6347378..60bb4d6 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -860,6 +860,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ; #ifdef CONFIG_PM_SLEEP if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && -- cgit v0.10.2 From 312a64435bda34bc7974140f8eb53a252b4ba4ed Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:35 +0200 Subject: ath5k: report 5/10 MHz channels Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index ce67ab7..260a6fc 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1400,6 +1400,16 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate); rxs->flag |= ath5k_rx_decrypted(ah, skb, rs); + switch (ah->ah_bwmode) { + case AR5K_BWMODE_5MHZ: + rxs->flag |= RX_FLAG_5MHZ; + break; + case AR5K_BWMODE_10MHZ: + rxs->flag |= RX_FLAG_10MHZ; + break; + default: + break; + } if (rxs->rate_idx >= 0 && rs->rs_rate == ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) -- cgit v0.10.2 From 6a09ae95ed248d6d946407bb1f955e5f2624663d Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:36 +0200 Subject: ath5k: set 5/10 MHz supported channels and fix duration Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 260a6fc..7c298af 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -165,28 +165,36 @@ static const struct ieee80211_rate ath5k_rates[] = { .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = ATH5K_RATE_CODE_6M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, { .bitrate = 90, .hw_value = ATH5K_RATE_CODE_9M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, { .bitrate = 120, .hw_value = ATH5K_RATE_CODE_12M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, { .bitrate = 180, .hw_value = ATH5K_RATE_CODE_18M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, { .bitrate = 240, .hw_value = ATH5K_RATE_CODE_24M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, { .bitrate = 360, .hw_value = ATH5K_RATE_CODE_36M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, { .bitrate = 480, .hw_value = ATH5K_RATE_CODE_48M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, { .bitrate = 540, .hw_value = ATH5K_RATE_CODE_54M, - .flags = 0 }, + .flags = IEEE80211_RATE_SUPPORTS_5MHZ | + IEEE80211_RATE_SUPPORTS_10MHZ }, }; static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index 1f16b42..c60d36a 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -144,11 +144,13 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, sifs = AR5K_INIT_SIFS_HALF_RATE; preamble *= 2; sym_time *= 2; + bitrate = DIV_ROUND_UP(bitrate, 2); break; case AR5K_BWMODE_5MHZ: sifs = AR5K_INIT_SIFS_QUARTER_RATE; preamble *= 4; sym_time *= 4; + bitrate = DIV_ROUND_UP(bitrate, 4); break; default: sifs = AR5K_INIT_SIFS_DEFAULT_BG; diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index 65fe929..0583c69 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -566,9 +566,11 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) { struct ieee80211_channel *channel = ah->ah_current_channel; enum ieee80211_band band; + struct ieee80211_supported_band *sband; struct ieee80211_rate *rate; u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); + u32 rate_flags, i; if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) return -EINVAL; @@ -605,7 +607,28 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) else band = IEEE80211_BAND_2GHZ; - rate = &ah->sbands[band].bitrates[0]; + switch (ah->ah_bwmode) { + case AR5K_BWMODE_5MHZ: + rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ; + break; + case AR5K_BWMODE_10MHZ: + rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ; + break; + default: + rate_flags = 0; + break; + } + sband = &ah->sbands[band]; + rate = NULL; + for (i = 0; i < sband->n_bitrates; i++) { + if ((rate_flags & sband->bitrates[i].flags) != rate_flags) + continue; + rate = &sband->bitrates[i]; + break; + } + if (WARN_ON(!rate)) + return -EINVAL; + ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false); /* ack_tx_time includes an SIFS already */ -- cgit v0.10.2 From 4d70f2fbe12118c5526a1d761f8ef562cecbbc2c Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:37 +0200 Subject: ath5k: enable support for 5 MHz and 10 MHz channels Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 2d691b8..74bd54d 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -29,6 +29,7 @@ #include #include #include +#include /* RX/TX descriptor hw structs * TODO: Driver part should only see sw structs */ diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 7c298af..48161ed 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -56,6 +56,7 @@ #include #include +#include #include #include @@ -443,11 +444,27 @@ ath5k_setup_bands(struct ieee80211_hw *hw) * Called with ah->lock. */ int -ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan) +ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef) { ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "channel set, resetting (%u -> %u MHz)\n", - ah->curchan->center_freq, chan->center_freq); + ah->curchan->center_freq, chandef->chan->center_freq); + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: + ah->ah_bwmode = AR5K_BWMODE_DEFAULT; + break; + case NL80211_CHAN_WIDTH_5: + ah->ah_bwmode = AR5K_BWMODE_5MHZ; + break; + case NL80211_CHAN_WIDTH_10: + ah->ah_bwmode = AR5K_BWMODE_10MHZ; + break; + default: + WARN_ON(1); + return -EINVAL; + } /* * To switch channels clear any pending DMA operations; @@ -455,7 +472,7 @@ ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan) * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ - return ath5k_reset(ah, chan, true); + return ath5k_reset(ah, chandef->chan, true); } void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) @@ -2525,6 +2542,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) /* SW support for IBSS_RSN is provided by mac80211 */ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ; + /* both antennas can be configured as RX or TX */ hw->wiphy->available_antennas_tx = 0x3; hw->wiphy->available_antennas_rx = 0x3; diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h index ca9a83c..97469d0 100644 --- a/drivers/net/wireless/ath/ath5k/base.h +++ b/drivers/net/wireless/ath/ath5k/base.h @@ -101,7 +101,7 @@ void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable); void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, struct ieee80211_vif *vif); -int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan); +int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef); void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 40825d4..4ee01f6 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -202,7 +202,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&ah->lock); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ret = ath5k_chan_set(ah, conf->chandef.chan); + ret = ath5k_chan_set(ah, &conf->chandef); if (ret < 0) goto unlock; } -- cgit v0.10.2 From d074e8d547853cc8b40cf93a460e8fbf9eaa3d00 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Wed, 14 Aug 2013 08:01:38 +0200 Subject: ath9k: enable CSA functionality in ath9k CSA is only enabled for one interface, but the same limitation applies for mac80211 too. It checks whether the beacon has been sent (different approaches for non-EDMA-enabled and EDMA-enabled devices), and completes the channel switch after that. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index df1c495..8519e75 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -420,6 +420,7 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif); void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif); void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif); void ath9k_set_beacon(struct ath_softc *sc); +bool ath9k_csa_is_finished(struct ath_softc *sc); /*******************/ /* Link Monitoring */ @@ -756,6 +757,7 @@ struct ath_softc { #endif struct ath_descdma txsdma; + struct ieee80211_vif *csa_vif; struct ath_ant_comb ant_comb; u8 ant_tx, ant_rx; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 1a17732..b5c16b3a 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -291,6 +291,23 @@ void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif) (unsigned long long)tsfadjust, avp->av_bslot); } +bool ath9k_csa_is_finished(struct ath_softc *sc) +{ + struct ieee80211_vif *vif; + + vif = sc->csa_vif; + if (!vif || !vif->csa_active) + return false; + + if (!ieee80211_csa_is_complete(vif)) + return false; + + ieee80211_csa_finish(vif); + + sc->csa_vif = NULL; + return true; +} + void ath9k_beacon_tasklet(unsigned long data) { struct ath_softc *sc = (struct ath_softc *)data; @@ -336,6 +353,10 @@ void ath9k_beacon_tasklet(unsigned long data) return; } + /* EDMA devices check that in the tx completion function. */ + if (!edma && ath9k_csa_is_finished(sc)) + return; + slot = ath9k_beacon_choose_slot(sc); vif = sc->beacon.bslot[slot]; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 60bb4d6..abf1eb5 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -861,6 +861,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ; + hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; #ifdef CONFIG_PM_SLEEP if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index ba382a8..ac9f18f 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1032,6 +1032,9 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, if (ath9k_uses_beacons(vif->type)) ath9k_beacon_remove_slot(sc, vif); + if (sc->csa_vif == vif) + sc->csa_vif = NULL; + ath9k_ps_wakeup(sc); ath9k_calculate_summary_state(hw, NULL); ath9k_ps_restore(sc); @@ -2318,6 +2321,19 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) clear_bit(SC_OP_SCANNING, &sc->sc_flags); } +static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct ath_softc *sc = hw->priv; + + /* mac80211 does not support CSA in multi-if cases (yet) */ + if (WARN_ON(sc->csa_vif)) + return; + + sc->csa_vif = vif; +} + struct ieee80211_ops ath9k_ops = { .tx = ath9k_tx, .start = ath9k_start, @@ -2365,4 +2381,5 @@ struct ieee80211_ops ath9k_ops = { #endif .sw_scan_start = ath9k_sw_scan_start, .sw_scan_complete = ath9k_sw_scan_complete, + .channel_switch_beacon = ath9k_channel_switch_beacon, }; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 7223e30..35b515f 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2559,6 +2559,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) if (ts.qid == sc->beacon.beaconq) { sc->beacon.tx_processed = true; sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); + + ath9k_csa_is_finished(sc); continue; } -- cgit v0.10.2 From ae1b1c5dcdef1ebd4b37a7d56ad767add757a660 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Fri, 16 Aug 2013 10:23:29 +0200 Subject: rt2x00: rt2800lib: introduce rt2800_get_txwi_rxwi_size helper The rt2800pci driver uses the same [RT]XWI size for all chipsets, however some chips requires different values. The size of the [RT]XWI structures is a constant value for a given chipset and it does not depend on the underlying interface. Add a helper function which returns the correct values for the actual chipset and use the new helper both in the rt2800usb and in the rt2800pci drivers. This ensures that both drivers are using the correct values. Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index dedc3d4..313da6a 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -521,6 +521,29 @@ void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev) } EXPORT_SYMBOL_GPL(rt2800_disable_wpdma); +void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev, + unsigned short *txwi_size, + unsigned short *rxwi_size) +{ + switch (rt2x00dev->chip.rt) { + case RT3593: + *txwi_size = TXWI_DESC_SIZE_4WORDS; + *rxwi_size = RXWI_DESC_SIZE_5WORDS; + break; + + case RT5592: + *txwi_size = TXWI_DESC_SIZE_5WORDS; + *rxwi_size = RXWI_DESC_SIZE_6WORDS; + break; + + default: + *txwi_size = TXWI_DESC_SIZE_4WORDS; + *rxwi_size = RXWI_DESC_SIZE_4WORDS; + break; + } +} +EXPORT_SYMBOL_GPL(rt2800_get_txwi_rxwi_size); + static bool rt2800_check_firmware_crc(const u8 *data, const size_t len) { u16 fw_crc; diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h index 6ec7394..a94ba44 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/rt2x00/rt2800lib.h @@ -226,4 +226,8 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev); +void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev, + unsigned short *txwi_size, + unsigned short *rxwi_size); + #endif /* RT2800LIB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 0005562..dcad90c 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -1189,12 +1189,17 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { static void rt2800pci_queue_init(struct data_queue *queue) { + struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; + unsigned short txwi_size, rxwi_size; + + rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size); + switch (queue->qid) { case QID_RX: queue->limit = 128; queue->data_size = AGGREGATION_SIZE; queue->desc_size = RXD_DESC_SIZE; - queue->winfo_size = RXWI_DESC_SIZE_4WORDS; + queue->winfo_size = rxwi_size; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; @@ -1205,7 +1210,7 @@ static void rt2800pci_queue_init(struct data_queue *queue) queue->limit = 64; queue->data_size = AGGREGATION_SIZE; queue->desc_size = TXD_DESC_SIZE; - queue->winfo_size = TXWI_DESC_SIZE_4WORDS; + queue->winfo_size = txwi_size; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; @@ -1213,7 +1218,7 @@ static void rt2800pci_queue_init(struct data_queue *queue) queue->limit = 8; queue->data_size = 0; /* No DMA required for beacons */ queue->desc_size = TXD_DESC_SIZE; - queue->winfo_size = TXWI_DESC_SIZE_4WORDS; + queue->winfo_size = txwi_size; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index fc9efdf..338034e 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -854,16 +854,7 @@ static void rt2800usb_queue_init(struct data_queue *queue) struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; unsigned short txwi_size, rxwi_size; - if (rt2x00_rt(rt2x00dev, RT3593)) { - txwi_size = TXWI_DESC_SIZE_4WORDS; - rxwi_size = RXWI_DESC_SIZE_5WORDS; - } else if (rt2x00_rt(rt2x00dev, RT5592)) { - txwi_size = TXWI_DESC_SIZE_5WORDS; - rxwi_size = RXWI_DESC_SIZE_6WORDS; - } else { - txwi_size = TXWI_DESC_SIZE_4WORDS; - rxwi_size = RXWI_DESC_SIZE_4WORDS; - } + rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size); switch (queue->qid) { case QID_RX: -- cgit v0.10.2 From 41caa760d6acaf47cbd44c3d78307fb9be089111 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Fri, 16 Aug 2013 10:23:30 +0200 Subject: rt2x00: rt2800pci: fix AUX_CTRL register setup for RT3090/3390/3593/5592 The 2011_1007_RT5390_RT5392_Linux_STA_V2.5.0.3_DPO driver enables PCIe wakeup for these chips as well. Do the same in rt2x00. References: rt28xx_init in common/rtmp_init_intf.c RTMPInitPCIeLinkCtrlValue in os/linux/rt_rbus_pci_drv.c Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index dcad90c..f8f2abb 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -507,9 +507,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); if (rt2x00_is_pcie(rt2x00dev) && - (rt2x00_rt(rt2x00dev, RT3572) || + (rt2x00_rt(rt2x00dev, RT3090) || + rt2x00_rt(rt2x00dev, RT3390) || + rt2x00_rt(rt2x00dev, RT3572) || + rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392))) { + rt2x00_rt(rt2x00dev, RT5392) || + rt2x00_rt(rt2x00dev, RT5592))) { rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, ®); rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1); rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1); -- cgit v0.10.2 From e3fec5a1c5a1ab4a85ca3f4e41c626fb953ce162 Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Mon, 19 Aug 2013 15:47:11 +0300 Subject: xfrm: remove irrelevant comment in xfrm_input(). This patch removes a comment in xfrm_input() which became irrelevant due to commit 2774c13, "xfrm: Handle blackhole route creation via afinfo". That commit removed returning -EREMOTE in the xfrm_lookup() method when the packet should be discarded and also removed the correspoinding -EREMOTE handlers. This was replaced by calling the make_blackhole() method. Therefore the comment about -EREMOTE is not relevant anymore. Signed-off-by: Rami Rosen Signed-off-by: Steffen Klassert diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f7078eb..ad8cc7b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2132,8 +2132,6 @@ restart: * have the xfrm_state's. We need to wait for KM to * negotiate new SA's or bail out with error.*/ if (net->xfrm.sysctl_larval_drop) { - /* EREMOTE tells the caller to generate - * a one-shot blackhole route. */ dst_release(dst); xfrm_pols_put(pols, drop_pols); XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); -- cgit v0.10.2 From 78f1ccc4f82cd7bbb65b0e938ed0dfd81e902fe0 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Fri, 16 Aug 2013 11:20:07 +0400 Subject: sis900: don't restart auto-negotiation each time after link resume. Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index f5d7ad7..b7a3930 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1309,23 +1309,9 @@ static void sis900_timer(unsigned long data) struct sis900_private *sis_priv = netdev_priv(net_dev); struct mii_phy *mii_phy = sis_priv->mii; static const int next_tick = 5*HZ; + int speed = 0, duplex = 0; u16 status; - if (!sis_priv->autong_complete){ - int uninitialized_var(speed), duplex = 0; - - sis900_read_mode(net_dev, &speed, &duplex); - if (duplex){ - sis900_set_mode(sis_priv, speed, duplex); - sis630_set_eq(net_dev, sis_priv->chipset_rev); - netif_carrier_on(net_dev); - } - - sis_priv->timer.expires = jiffies + HZ; - add_timer(&sis_priv->timer); - return; - } - status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); @@ -1336,8 +1322,16 @@ static void sis900_timer(unsigned long data) status = sis900_default_phy(net_dev); mii_phy = sis_priv->mii; - if (status & MII_STAT_LINK) - sis900_check_mode(net_dev, mii_phy); + if (status & MII_STAT_LINK) { + WARN_ON(!(status & MII_STAT_AUTO_DONE)); + + sis900_read_mode(net_dev, &speed, &duplex); + if (duplex) { + sis900_set_mode(sis_priv, speed, duplex); + sis630_set_eq(net_dev, sis_priv->chipset_rev); + netif_carrier_on(net_dev); + } + } } else { /* Link ON -> OFF */ if (!(status & MII_STAT_LINK)){ -- cgit v0.10.2 From 197d439eec005564648ddd48a76ee5ddf4c15e80 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 16 Aug 2013 16:09:32 +0800 Subject: r8152: remove clearing the memory to zero for netdev priv Remove memset(tp, 0, sizeof(*tp)); Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ef2498c..c13662b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2105,7 +2105,6 @@ static int rtl8152_probe(struct usb_interface *intf, SET_NETDEV_DEV(netdev, &intf->dev); tp = netdev_priv(netdev); - memset(tp, 0, sizeof(*tp)); tp->msg_enable = 0x7FFF; tasklet_init(&tp->tl, bottom_half, (unsigned long)tp); -- cgit v0.10.2 From dff4e8ad88f164edc0a31e178812b99eede3bd22 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 16 Aug 2013 16:09:33 +0800 Subject: r8152: replace void * with struct r8152 * Change the type of contex of tx_agg and rx_agg from void * to staruc r8152 *. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c13662b..a18f02d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -329,10 +329,12 @@ struct tx_desc { #define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */ }; +struct r8152; + struct rx_agg { struct list_head list; struct urb *urb; - void *context; + struct r8152 *context; void *buffer; void *head; }; @@ -340,7 +342,7 @@ struct rx_agg { struct tx_agg { struct list_head list; struct urb *urb; - void *context; + struct r8152 *context; void *buffer; void *head; u32 skb_num; -- cgit v0.10.2 From a5a4f468e3b467457b27851152671058b95b71bb Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 16 Aug 2013 16:09:34 +0800 Subject: r8152: replace lockflags with flags Replace lockflags with flags. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index a18f02d..41b99ce 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -769,7 +769,7 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev) static void read_bulk_callback(struct urb *urb) { struct net_device *netdev; - unsigned long lockflags; + unsigned long flags; int status = urb->status; struct rx_agg *agg; struct r8152 *tp; @@ -798,9 +798,9 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length < ETH_ZLEN) break; - spin_lock_irqsave(&tp->rx_lock, lockflags); + spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); - spin_unlock_irqrestore(&tp->rx_lock, lockflags); + spin_unlock_irqrestore(&tp->rx_lock, flags); tasklet_schedule(&tp->tl); return; case -ESHUTDOWN: @@ -821,9 +821,9 @@ static void read_bulk_callback(struct urb *urb) if (result == -ENODEV) { netif_device_detach(tp->netdev); } else if (result) { - spin_lock_irqsave(&tp->rx_lock, lockflags); + spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); - spin_unlock_irqrestore(&tp->rx_lock, lockflags); + spin_unlock_irqrestore(&tp->rx_lock, flags); tasklet_schedule(&tp->tl); } } @@ -831,7 +831,7 @@ static void read_bulk_callback(struct urb *urb) static void write_bulk_callback(struct urb *urb) { struct net_device_stats *stats; - unsigned long lockflags; + unsigned long flags; struct tx_agg *agg; struct r8152 *tp; int status = urb->status; @@ -853,9 +853,9 @@ static void write_bulk_callback(struct urb *urb) stats->tx_bytes += agg->skb_len; } - spin_lock_irqsave(&tp->tx_lock, lockflags); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock_irqrestore(&tp->tx_lock, lockflags); + spin_unlock_irqrestore(&tp->tx_lock, flags); if (!netif_carrier_ok(tp->netdev)) return; @@ -1119,7 +1119,7 @@ static void rx_bottom(struct r8152 *tp) struct net_device *netdev; struct rx_agg *agg; struct rx_desc *rx_desc; - unsigned long lockflags; + unsigned long flags; struct list_head *cursor, *next; struct sk_buff *skb; struct urb *urb; @@ -1132,16 +1132,16 @@ static void rx_bottom(struct r8152 *tp) stats = rtl8152_get_stats(netdev); - spin_lock_irqsave(&tp->rx_lock, lockflags); + spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_safe(cursor, next, &tp->rx_done) { list_del_init(cursor); - spin_unlock_irqrestore(&tp->rx_lock, lockflags); + spin_unlock_irqrestore(&tp->rx_lock, flags); agg = list_entry(cursor, struct rx_agg, list); urb = agg->urb; if (urb->actual_length < ETH_ZLEN) { ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); - spin_lock_irqsave(&tp->rx_lock, lockflags); + spin_lock_irqsave(&tp->rx_lock, flags); if (ret && ret != -ENODEV) { list_add_tail(&agg->list, next); tasklet_schedule(&tp->tl); @@ -1182,13 +1182,13 @@ static void rx_bottom(struct r8152 *tp) } ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); - spin_lock_irqsave(&tp->rx_lock, lockflags); + spin_lock_irqsave(&tp->rx_lock, flags); if (ret && ret != -ENODEV) { list_add_tail(&agg->list, next); tasklet_schedule(&tp->tl); } } - spin_unlock_irqrestore(&tp->rx_lock, lockflags); + spin_unlock_irqrestore(&tp->rx_lock, flags); } static void tx_bottom(struct r8152 *tp) @@ -1196,7 +1196,7 @@ static void tx_bottom(struct r8152 *tp) struct net_device_stats *stats; struct net_device *netdev; struct tx_agg *agg; - unsigned long lockflags; + unsigned long flags; u32 remain, total; u8 *tx_data; int res; @@ -1205,7 +1205,7 @@ static void tx_bottom(struct r8152 *tp) next_agg: agg = NULL; - spin_lock_irqsave(&tp->tx_lock, lockflags); + spin_lock_irqsave(&tp->tx_lock, flags); if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) { struct list_head *cursor; @@ -1213,7 +1213,7 @@ next_agg: list_del_init(cursor); agg = list_entry(cursor, struct tx_agg, list); } - spin_unlock_irqrestore(&tp->tx_lock, lockflags); + spin_unlock_irqrestore(&tp->tx_lock, flags); if (!agg) return; @@ -1268,9 +1268,9 @@ next_agg: netif_warn(tp, tx_err, netdev, "failed tx_urb %d\n", res); stats->tx_dropped += agg->skb_num; - spin_lock_irqsave(&tp->tx_lock, lockflags); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock_irqrestore(&tp->tx_lock, lockflags); + spin_unlock_irqrestore(&tp->tx_lock, flags); } return; } @@ -1373,7 +1373,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, { struct r8152 *tp = netdev_priv(netdev); struct net_device_stats *stats = rtl8152_get_stats(netdev); - unsigned long lockflags; + unsigned long flags; struct tx_agg *agg = NULL; struct tx_desc *tx_desc; unsigned int len; @@ -1382,7 +1382,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); - spin_lock_irqsave(&tp->tx_lock, lockflags); + spin_lock_irqsave(&tp->tx_lock, flags); if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) { struct list_head *cursor; @@ -1390,7 +1390,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, list_del_init(cursor); agg = list_entry(cursor, struct tx_agg, list); } - spin_unlock_irqrestore(&tp->tx_lock, lockflags); + spin_unlock_irqrestore(&tp->tx_lock, flags); if (!agg) { skb_queue_tail(&tp->tx_queue, skb); @@ -1419,9 +1419,9 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, netif_warn(tp, tx_err, netdev, "failed tx_urb %d\n", res); stats->tx_dropped++; - spin_lock_irqsave(&tp->tx_lock, lockflags); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock_irqrestore(&tp->tx_lock, lockflags); + spin_unlock_irqrestore(&tp->tx_lock, flags); } } -- cgit v0.10.2 From 0de98f6c6fb63395a653f58b53663ce6c565a1e6 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 16 Aug 2013 16:09:35 +0800 Subject: r8152: adjust some duplicated code - Use r8152_get_tx_agg for getting tx agg list - Replace submit rx with goto submit Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 41b99ce..4dee76b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1067,6 +1067,24 @@ err1: return -ENOMEM; } +static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp) +{ + struct tx_agg *agg = NULL; + unsigned long flags; + + spin_lock_irqsave(&tp->tx_lock, flags); + if (!list_empty(&tp->tx_free)) { + struct list_head *cursor; + + cursor = tp->tx_free.next; + list_del_init(cursor); + agg = list_entry(cursor, struct tx_agg, list); + } + spin_unlock_irqrestore(&tp->tx_lock, flags); + + return agg; +} + static void r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) { @@ -1139,15 +1157,8 @@ static void rx_bottom(struct r8152 *tp) agg = list_entry(cursor, struct rx_agg, list); urb = agg->urb; - if (urb->actual_length < ETH_ZLEN) { - ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); - spin_lock_irqsave(&tp->rx_lock, flags); - if (ret && ret != -ENODEV) { - list_add_tail(&agg->list, next); - tasklet_schedule(&tp->tl); - } - continue; - } + if (urb->actual_length < ETH_ZLEN) + goto submit; len_used = 0; rx_desc = agg->head; @@ -1181,6 +1192,7 @@ static void rx_bottom(struct r8152 *tp) len_used += sizeof(struct rx_desc) + pkt_len; } +submit: ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); spin_lock_irqsave(&tp->rx_lock, flags); if (ret && ret != -ENODEV) { @@ -1205,16 +1217,10 @@ static void tx_bottom(struct r8152 *tp) next_agg: agg = NULL; - spin_lock_irqsave(&tp->tx_lock, flags); - if (!skb_queue_empty(&tp->tx_queue) && !list_empty(&tp->tx_free)) { - struct list_head *cursor; - - cursor = tp->tx_free.next; - list_del_init(cursor); - agg = list_entry(cursor, struct tx_agg, list); - } - spin_unlock_irqrestore(&tp->tx_lock, flags); + if (skb_queue_empty(&tp->tx_queue)) + return; + agg = r8152_get_tx_agg(tp); if (!agg) return; @@ -1382,15 +1388,10 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); - spin_lock_irqsave(&tp->tx_lock, flags); - if (!list_empty(&tp->tx_free) && skb_queue_empty(&tp->tx_queue)) { - struct list_head *cursor; - - cursor = tp->tx_free.next; - list_del_init(cursor); - agg = list_entry(cursor, struct tx_agg, list); - } - spin_unlock_irqrestore(&tp->tx_lock, flags); + /* If tx_queue is not empty, it means at least one previous packt */ + /* is waiting for sending. Don't send current one before it. */ + if (skb_queue_empty(&tp->tx_queue)) + agg = r8152_get_tx_agg(tp); if (!agg) { skb_queue_tail(&tp->tx_queue, skb); -- cgit v0.10.2 From 43a4478d65ffaa1c2def8ffceafbc3a3f1e99b82 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 16 Aug 2013 16:09:36 +0800 Subject: r8152: move some declearation of variables Move some declearation of variables in rx_bottom(). Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 4dee76b..0a88f64 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1133,25 +1133,19 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) static void rx_bottom(struct r8152 *tp) { - struct net_device_stats *stats; - struct net_device *netdev; - struct rx_agg *agg; - struct rx_desc *rx_desc; unsigned long flags; struct list_head *cursor, *next; - struct sk_buff *skb; - struct urb *urb; - unsigned pkt_len; - int len_used; - u8 *rx_data; - int ret; - - netdev = tp->netdev; - - stats = rtl8152_get_stats(netdev); spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_safe(cursor, next, &tp->rx_done) { + struct rx_desc *rx_desc; + struct rx_agg *agg; + unsigned pkt_len; + int len_used = 0; + struct urb *urb; + u8 *rx_data; + int ret; + list_del_init(cursor); spin_unlock_irqrestore(&tp->rx_lock, flags); @@ -1160,16 +1154,21 @@ static void rx_bottom(struct r8152 *tp) if (urb->actual_length < ETH_ZLEN) goto submit; - len_used = 0; rx_desc = agg->head; rx_data = agg->head; pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; len_used += sizeof(struct rx_desc) + pkt_len; while (urb->actual_length >= len_used) { + struct net_device *netdev = tp->netdev; + struct net_device_stats *stats; + struct sk_buff *skb; + if (pkt_len < ETH_ZLEN) break; + stats = rtl8152_get_stats(netdev); + pkt_len -= 4; /* CRC */ rx_data += sizeof(struct rx_desc); -- cgit v0.10.2 From b1379d9a20167c41b682e96841b34aab32ec50a5 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 16 Aug 2013 16:09:37 +0800 Subject: r8152: adjust tx_bottom function Split some parts of code into another function to simplify tx_bottom(). Use while loop to replace the goto loop. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0a88f64..825edfe 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1131,6 +1131,51 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) } } +static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) +{ + u32 remain; + u8 *tx_data; + + tx_data = agg->head; + agg->skb_num = agg->skb_len = 0; + remain = rx_buf_sz - sizeof(struct tx_desc); + + while (remain >= ETH_ZLEN) { + struct tx_desc *tx_desc; + struct sk_buff *skb; + unsigned int len; + + skb = skb_dequeue(&tp->tx_queue); + if (!skb) + break; + + len = skb->len; + if (remain < len) { + skb_queue_head(&tp->tx_queue, skb); + break; + } + + tx_desc = (struct tx_desc *)tx_data; + tx_data += sizeof(*tx_desc); + + r8152_tx_csum(tp, tx_desc, skb); + memcpy(tx_data, skb->data, len); + agg->skb_num++; + agg->skb_len += len; + dev_kfree_skb_any(skb); + + tx_data = tx_agg_align(tx_data + len); + remain = rx_buf_sz - sizeof(*tx_desc) - + (u32)((void *)tx_data - agg->head); + } + + usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), + agg->head, (int)(tx_data - (u8 *)agg->head), + (usb_complete_t)write_bulk_callback, agg); + + return usb_submit_urb(agg->urb, GFP_ATOMIC); +} + static void rx_bottom(struct r8152 *tp) { unsigned long flags; @@ -1204,82 +1249,39 @@ submit: static void tx_bottom(struct r8152 *tp) { - struct net_device_stats *stats; - struct net_device *netdev; - struct tx_agg *agg; - unsigned long flags; - u32 remain, total; - u8 *tx_data; int res; - netdev = tp->netdev; - -next_agg: - agg = NULL; - if (skb_queue_empty(&tp->tx_queue)) - return; + do { + struct tx_agg *agg; - agg = r8152_get_tx_agg(tp); - if (!agg) - return; - - tx_data = agg->head; - agg->skb_num = agg->skb_len = 0; - remain = rx_buf_sz - sizeof(struct tx_desc); - total = 0; - - while (remain >= ETH_ZLEN) { - struct tx_desc *tx_desc; - struct sk_buff *skb; - unsigned int len; - - skb = skb_dequeue(&tp->tx_queue); - if (!skb) + if (skb_queue_empty(&tp->tx_queue)) break; - len = skb->len; - if (remain < len) { - skb_queue_head(&tp->tx_queue, skb); + agg = r8152_get_tx_agg(tp); + if (!agg) break; - } - - tx_data = tx_agg_align(tx_data); - tx_desc = (struct tx_desc *)tx_data; - tx_data += sizeof(*tx_desc); - r8152_tx_csum(tp, tx_desc, skb); - memcpy(tx_data, skb->data, len); - agg->skb_num++; - agg->skb_len += len; - dev_kfree_skb_any(skb); - - tx_data += len; - remain = rx_buf_sz - sizeof(*tx_desc) - - (u32)(tx_agg_align(tx_data) - agg->head); - } - - usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), - agg->head, (int)(tx_data - (u8 *)agg->head), - (usb_complete_t)write_bulk_callback, agg); - res = usb_submit_urb(agg->urb, GFP_ATOMIC); + res = r8152_tx_agg_fill(tp, agg); + if (res) { + struct net_device_stats *stats; + struct net_device *netdev; + unsigned long flags; - stats = rtl8152_get_stats(netdev); + netdev = tp->netdev; + stats = rtl8152_get_stats(netdev); - if (res) { - /* Can we get/handle EPIPE here? */ - if (res == -ENODEV) { - netif_device_detach(netdev); - } else { - netif_warn(tp, tx_err, netdev, - "failed tx_urb %d\n", res); - stats->tx_dropped += agg->skb_num; - spin_lock_irqsave(&tp->tx_lock, flags); - list_add_tail(&agg->list, &tp->tx_free); - spin_unlock_irqrestore(&tp->tx_lock, flags); + if (res == -ENODEV) { + netif_device_detach(netdev); + } else { + netif_warn(tp, tx_err, netdev, + "failed tx_urb %d\n", res); + stats->tx_dropped += agg->skb_num; + spin_lock_irqsave(&tp->tx_lock, flags); + list_add_tail(&agg->list, &tp->tx_free); + spin_unlock_irqrestore(&tp->tx_lock, flags); + } } - return; - } - goto next_agg; + } while (res == 0); } static void bottom_half(unsigned long data) -- cgit v0.10.2 From 7559fb2fc547b3507ba462c6558e291fb21b9cee Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 16 Aug 2013 16:09:38 +0800 Subject: r8152: add comments Add comments. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 825edfe..f3fce41 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -790,6 +790,9 @@ static void read_bulk_callback(struct urb *urb) return; netdev = tp->netdev; + + /* When link down, the driver would cancel all bulks. */ + /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(netdev)) return; @@ -1296,6 +1299,8 @@ static void bottom_half(unsigned long data) if (!test_bit(WORK_ENABLE, &tp->flags)) return; + /* When link down, the driver would cancel all bulks. */ + /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(tp->netdev)) return; -- cgit v0.10.2 From 9c2e24e16fbccf6cc1102442acc4a629f79615a7 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 19 Aug 2013 11:22:48 -0700 Subject: vxlan: Restructure vxlan socket apis. Restructure vxlan-socket management APIs so that it can be shared between vxlan and ovs modules. This patch does not change any functionality. Signed-off-by: Pravin B Shelar v6-v7: - get rid of zero refcnt vs from hashtable. Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 570ad7a..b784ee6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -188,7 +188,7 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) } /* Find VXLAN socket based on network namespace and UDP port */ -static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port) +static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) { struct vxlan_sock *vs; @@ -205,7 +205,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) struct vxlan_sock *vs; struct vxlan_dev *vxlan; - vs = vxlan_find_port(net, port); + vs = vxlan_find_sock(net, port); if (!vs) return NULL; @@ -1365,25 +1365,31 @@ static void vxlan_cleanup(unsigned long arg) mod_timer(&vxlan->age_timer, next_timer); } +static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) +{ + __u32 vni = vxlan->default_dst.remote_vni; + + vxlan->vn_sock = vs; + hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); +} + /* Setup stats when device is created */ static int vxlan_init(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_sock *vs; - __u32 vni = vxlan->default_dst.remote_vni; dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; spin_lock(&vn->sock_lock); - vs = vxlan_find_port(dev_net(dev), vxlan->dst_port); + vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); if (vs) { /* If we have a socket with same port already, reuse it */ atomic_inc(&vs->refcnt); - vxlan->vn_sock = vs; - hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); + vxlan_vs_add_dev(vs, vxlan); } else { /* otherwise make new socket outside of RTNL */ dev_hold(dev); @@ -1633,6 +1639,7 @@ static void vxlan_del_work(struct work_struct *work) static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) { + struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; struct sock *sk; struct sockaddr_in vxlan_addr = { @@ -1644,8 +1651,10 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) unsigned int h; vs = kmalloc(sizeof(*vs), GFP_KERNEL); - if (!vs) + if (!vs) { + pr_debug("memory alocation failure\n"); return ERR_PTR(-ENOMEM); + } for (h = 0; h < VNI_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vs->vni_list[h]); @@ -1673,57 +1682,57 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) kfree(vs); return ERR_PTR(rc); } + atomic_set(&vs->refcnt, 1); /* Disable multicast loopback */ inet_sk(sk)->mc_loop = 0; + spin_lock(&vn->sock_lock); + hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); + spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ udp_sk(sk)->encap_type = 1; udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; udp_encap_enable(); - atomic_set(&vs->refcnt, 1); + return vs; +} + +static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port) +{ + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + struct vxlan_sock *vs; + + vs = vxlan_socket_create(net, port); + if (!IS_ERR(vs)) + return vs; + spin_lock(&vn->sock_lock); + vs = vxlan_find_sock(net, port); + if (vs) + atomic_inc(&vs->refcnt); + else + vs = ERR_PTR(-EINVAL); + + spin_unlock(&vn->sock_lock); return vs; } /* Scheduled at device creation to bind to a socket */ static void vxlan_sock_work(struct work_struct *work) { - struct vxlan_dev *vxlan - = container_of(work, struct vxlan_dev, sock_work); - struct net_device *dev = vxlan->dev; - struct net *net = dev_net(dev); - __u32 vni = vxlan->default_dst.remote_vni; - __be16 port = vxlan->dst_port; + struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work); + struct net *net = dev_net(vxlan->dev); struct vxlan_net *vn = net_generic(net, vxlan_net_id); - struct vxlan_sock *nvs, *ovs; - - nvs = vxlan_socket_create(net, port); - if (IS_ERR(nvs)) { - netdev_err(vxlan->dev, "Can not create UDP socket, %ld\n", - PTR_ERR(nvs)); - goto out; - } + __be16 port = vxlan->dst_port; + struct vxlan_sock *nvs; + nvs = vxlan_sock_add(net, port); spin_lock(&vn->sock_lock); - /* Look again to see if can reuse socket */ - ovs = vxlan_find_port(net, port); - if (ovs) { - atomic_inc(&ovs->refcnt); - vxlan->vn_sock = ovs; - hlist_add_head_rcu(&vxlan->hlist, vni_head(ovs, vni)); - spin_unlock(&vn->sock_lock); - - sk_release_kernel(nvs->sock->sk); - kfree(nvs); - } else { - vxlan->vn_sock = nvs; - hlist_add_head_rcu(&nvs->hlist, vs_head(net, port)); - hlist_add_head_rcu(&vxlan->hlist, vni_head(nvs, vni)); - spin_unlock(&vn->sock_lock); - } -out: - dev_put(dev); + if (!IS_ERR(nvs)) + vxlan_vs_add_dev(nvs, vxlan); + spin_unlock(&vn->sock_lock); + + dev_put(vxlan->dev); } static int vxlan_newlink(struct net *net, struct net_device *dev, @@ -1838,7 +1847,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) struct vxlan_dev *vxlan = netdev_priv(dev); spin_lock(&vn->sock_lock); - hlist_del_rcu(&vxlan->hlist); + if (!hlist_unhashed(&vxlan->hlist)) + hlist_del_rcu(&vxlan->hlist); spin_unlock(&vn->sock_lock); list_del(&vxlan->next); -- cgit v0.10.2 From 7ce04758279514ca1d8ebfe322508a4a430fe2c8 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 19 Aug 2013 11:22:54 -0700 Subject: vxlan: Restructure vxlan receive. Use iptunnel_pull_header() for better code sharing. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b784ee6..1afb979 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -57,6 +57,7 @@ #define VXLAN_VID_MASK (VXLAN_N_VID - 1) /* IP header + UDP + VXLAN + Ethernet header */ #define VXLAN_HEADROOM (20 + 8 + 8 + 14) +#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ @@ -868,15 +869,12 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) __u32 vni; int err; - /* pop off outer UDP header */ - __skb_pull(skb, sizeof(struct udphdr)); - /* Need Vxlan and inner Ethernet header to be present */ - if (!pskb_may_pull(skb, sizeof(struct vxlanhdr))) + if (!pskb_may_pull(skb, VXLAN_HLEN)) goto error; - /* Drop packets with reserved bits set */ - vxh = (struct vxlanhdr *) skb->data; + /* Return packets with reserved bits set */ + vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); if (vxh->vx_flags != htonl(VXLAN_FLAGS) || (vxh->vx_vni & htonl(0xff))) { netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", @@ -884,8 +882,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto error; } - __skb_pull(skb, sizeof(struct vxlanhdr)); - /* Is this VNI defined? */ vni = ntohl(vxh->vx_vni) >> 8; port = inet_sk(sk)->inet_sport; @@ -896,7 +892,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; } - if (!pskb_may_pull(skb, ETH_HLEN)) { + if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) { vxlan->dev->stats.rx_length_errors++; vxlan->dev->stats.rx_errors++; goto drop; @@ -904,8 +900,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) skb_reset_mac_header(skb); - /* Re-examine inner Ethernet packet */ - oip = ip_hdr(skb); skb->protocol = eth_type_trans(skb, vxlan->dev); /* Ignore packet loops (and multicast echo) */ @@ -913,11 +907,12 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) vxlan->dev->dev_addr) == 0) goto drop; + /* Re-examine inner Ethernet packet */ + oip = ip_hdr(skb); if ((vxlan->flags & VXLAN_F_LEARN) && vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source)) goto drop; - __skb_tunnel_rx(skb, vxlan->dev); skb_reset_network_header(skb); /* If the NIC driver gave us an encapsulated packet with @@ -953,9 +948,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) return 0; error: - /* Put UDP header back */ - __skb_push(skb, sizeof(struct udphdr)); - return 1; drop: /* Consume bad packet */ -- cgit v0.10.2 From 5cfccc5a47ebcaac5d5fa82a6dcef94a78b1eb6c Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 19 Aug 2013 11:23:02 -0700 Subject: vxlan: Add vxlan recv demux. Once we have ovs-vxlan functionality, one UDP port can be assigned to kernel-vxlan or ovs-vxlan port. Therefore following patch adds vxlan demux functionality, so that vxlan or ovs module can register for particular port. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1afb979..236c445 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -83,8 +83,12 @@ static int vxlan_net_id; static const u8 all_zeros_mac[ETH_ALEN]; +struct vxlan_sock; +typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); + /* per UDP socket information */ struct vxlan_sock { + vxlan_rcv_t *rcv; struct hlist_node hlist; struct rcu_head rcu; struct work_struct del_work; @@ -200,16 +204,10 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) return NULL; } -/* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) +static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) { - struct vxlan_sock *vs; struct vxlan_dev *vxlan; - vs = vxlan_find_sock(net, port); - if (!vs) - return NULL; - hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) { if (vxlan->default_dst.remote_vni == id) return vxlan; @@ -218,6 +216,18 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) return NULL; } +/* Look up VNI in a per net namespace table */ +static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) +{ + struct vxlan_sock *vs; + + vs = vxlan_find_sock(net, port); + if (!vs) + return NULL; + + return vxlan_vs_find_vni(vs, id); +} + /* Fill in neighbour message in skbuff. */ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, const struct vxlan_fdb *fdb, @@ -861,13 +871,9 @@ static void vxlan_igmp_leave(struct work_struct *work) /* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { - struct iphdr *oip; + struct vxlan_sock *vs; struct vxlanhdr *vxh; - struct vxlan_dev *vxlan; - struct pcpu_tstats *stats; __be16 port; - __u32 vni; - int err; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -882,24 +888,44 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto error; } - /* Is this VNI defined? */ - vni = ntohl(vxh->vx_vni) >> 8; + if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) + goto drop; + port = inet_sk(sk)->inet_sport; - vxlan = vxlan_find_vni(sock_net(sk), vni, port); - if (!vxlan) { - netdev_dbg(skb->dev, "unknown vni %d port %u\n", - vni, ntohs(port)); + + vs = vxlan_find_sock(sock_net(sk), port); + if (!vs) goto drop; - } - if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) { - vxlan->dev->stats.rx_length_errors++; - vxlan->dev->stats.rx_errors++; + vs->rcv(vs, skb, vxh->vx_vni); + return 0; + +drop: + /* Consume bad packet */ + kfree_skb(skb); + return 0; + +error: + /* Return non vxlan pkt */ + return 1; +} + +static void vxlan_rcv(struct vxlan_sock *vs, + struct sk_buff *skb, __be32 vx_vni) +{ + struct iphdr *oip; + struct vxlan_dev *vxlan; + struct pcpu_tstats *stats; + __u32 vni; + int err; + + vni = ntohl(vx_vni) >> 8; + /* Is this VNI defined? */ + vxlan = vxlan_vs_find_vni(vs, vni); + if (!vxlan) goto drop; - } skb_reset_mac_header(skb); - skb->protocol = eth_type_trans(skb, vxlan->dev); /* Ignore packet loops (and multicast echo) */ @@ -946,13 +972,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) netif_rx(skb); - return 0; -error: - return 1; + return; drop: /* Consume bad packet */ kfree_skb(skb); - return 0; } static int arp_reduce(struct net_device *dev, struct sk_buff *skb) @@ -1629,7 +1652,8 @@ static void vxlan_del_work(struct work_struct *work) kfree_rcu(vs, rcu); } -static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) +static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, + vxlan_rcv_t *rcv) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; @@ -1675,6 +1699,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) return ERR_PTR(rc); } atomic_set(&vs->refcnt, 1); + vs->rcv = rcv; /* Disable multicast loopback */ inet_sk(sk)->mc_loop = 0; @@ -1689,23 +1714,29 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) return vs; } -static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port) +static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + vxlan_rcv_t *rcv) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; - vs = vxlan_socket_create(net, port); + vs = vxlan_socket_create(net, port, rcv); if (!IS_ERR(vs)) return vs; spin_lock(&vn->sock_lock); vs = vxlan_find_sock(net, port); - if (vs) - atomic_inc(&vs->refcnt); - else + if (vs) { + if (vs->rcv == rcv) + atomic_inc(&vs->refcnt); + else + vs = ERR_PTR(-EBUSY); + } + spin_unlock(&vn->sock_lock); + + if (!vs) vs = ERR_PTR(-EINVAL); - spin_unlock(&vn->sock_lock); return vs; } @@ -1718,7 +1749,7 @@ static void vxlan_sock_work(struct work_struct *work) __be16 port = vxlan->dst_port; struct vxlan_sock *nvs; - nvs = vxlan_sock_add(net, port); + nvs = vxlan_sock_add(net, port, vxlan_rcv); spin_lock(&vn->sock_lock); if (!IS_ERR(nvs)) vxlan_vs_add_dev(nvs, vxlan); -- cgit v0.10.2 From 012a5729ff933ecd07e7470a65d84577aef9ae03 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 19 Aug 2013 11:23:07 -0700 Subject: vxlan: Extend vxlan handlers for openvswitch. Following patch adds data field to vxlan socket and export vxlan handler api. vh->data is required to store private data per vxlan handler. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 236c445..aab927b 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -41,6 +41,7 @@ #include #include #include +#include #define VXLAN_VERSION "0.1" @@ -83,20 +84,6 @@ static int vxlan_net_id; static const u8 all_zeros_mac[ETH_ALEN]; -struct vxlan_sock; -typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); - -/* per UDP socket information */ -struct vxlan_sock { - vxlan_rcv_t *rcv; - struct hlist_node hlist; - struct rcu_head rcu; - struct work_struct del_work; - atomic_t refcnt; - struct socket *sock; - struct hlist_head vni_list[VNI_HASH_SIZE]; -}; - /* per-network namespace private data for this module */ struct vxlan_net { struct list_head vxlan_list; @@ -813,8 +800,10 @@ static void vxlan_sock_hold(struct vxlan_sock *vs) atomic_inc(&vs->refcnt); } -static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs) +void vxlan_sock_release(struct vxlan_sock *vs) { + struct vxlan_net *vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); + if (!atomic_dec_and_test(&vs->refcnt)) return; @@ -824,6 +813,7 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs) queue_work(vxlan_wq, &vs->del_work); } +EXPORT_SYMBOL_GPL(vxlan_sock_release); /* Callback to update multicast group membership when first VNI on * multicast asddress is brought up @@ -832,7 +822,6 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs) static void vxlan_igmp_join(struct work_struct *work) { struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join); - struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; struct ip_mreqn mreq = { @@ -844,7 +833,7 @@ static void vxlan_igmp_join(struct work_struct *work) ip_mc_join_group(sk, &mreq); release_sock(sk); - vxlan_sock_release(vn, vs); + vxlan_sock_release(vs); dev_put(vxlan->dev); } @@ -852,7 +841,6 @@ static void vxlan_igmp_join(struct work_struct *work) static void vxlan_igmp_leave(struct work_struct *work) { struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave); - struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; struct ip_mreqn mreq = { @@ -864,7 +852,7 @@ static void vxlan_igmp_leave(struct work_struct *work) ip_mc_leave_group(sk, &mreq); release_sock(sk); - vxlan_sock_release(vn, vs); + vxlan_sock_release(vs); dev_put(vxlan->dev); } @@ -1429,13 +1417,12 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_sock *vs = vxlan->vn_sock; vxlan_fdb_delete_default(vxlan); if (vs) - vxlan_sock_release(vn, vs); + vxlan_sock_release(vs); free_percpu(dev->tstats); } @@ -1653,7 +1640,7 @@ static void vxlan_del_work(struct work_struct *work) } static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, - vxlan_rcv_t *rcv) + vxlan_rcv_t *rcv, void *data) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; @@ -1700,6 +1687,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, } atomic_set(&vs->refcnt, 1); vs->rcv = rcv; + vs->data = data; /* Disable multicast loopback */ inet_sk(sk)->mc_loop = 0; @@ -1714,16 +1702,20 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, return vs; } -static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, - vxlan_rcv_t *rcv) +struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + vxlan_rcv_t *rcv, void *data, + bool no_share) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; - vs = vxlan_socket_create(net, port, rcv); + vs = vxlan_socket_create(net, port, rcv, data); if (!IS_ERR(vs)) return vs; + if (no_share) /* Return error if sharing is not allowed. */ + return vs; + spin_lock(&vn->sock_lock); vs = vxlan_find_sock(net, port); if (vs) { @@ -1739,6 +1731,7 @@ static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, return vs; } +EXPORT_SYMBOL_GPL(vxlan_sock_add); /* Scheduled at device creation to bind to a socket */ static void vxlan_sock_work(struct work_struct *work) @@ -1749,7 +1742,7 @@ static void vxlan_sock_work(struct work_struct *work) __be16 port = vxlan->dst_port; struct vxlan_sock *nvs; - nvs = vxlan_sock_add(net, port, vxlan_rcv); + nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false); spin_lock(&vn->sock_lock); if (!IS_ERR(nvs)) vxlan_vs_add_dev(nvs, vxlan); diff --git a/include/net/vxlan.h b/include/net/vxlan.h new file mode 100644 index 0000000..43de275 --- /dev/null +++ b/include/net/vxlan.h @@ -0,0 +1,31 @@ +#ifndef __NET_VXLAN_H +#define __NET_VXLAN_H 1 + +#include +#include +#include + +#define VNI_HASH_BITS 10 +#define VNI_HASH_SIZE (1< Date: Mon, 19 Aug 2013 11:23:17 -0700 Subject: vxlan: Factor out vxlan send api. Following patch allows more code sharing between vxlan and ovs-vxlan. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aab927b..f3496e9 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1082,11 +1082,8 @@ static void vxlan_sock_put(struct sk_buff *skb) } /* On transmit, associate with the tunnel socket */ -static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) +static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb) { - struct vxlan_dev *vxlan = netdev_priv(dev); - struct sock *sk = vxlan->vn_sock->sock->sk; - skb_orphan(skb); sock_hold(sk); skb->sk = sk; @@ -1098,9 +1095,9 @@ static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) * better and maybe available from hardware * secondary choice is to use jhash on the Ethernet header */ -static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb) +__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb) { - unsigned int range = (vxlan->port_max - vxlan->port_min) + 1; + unsigned int range = (port_max - port_min) + 1; u32 hash; hash = skb_get_rxhash(skb); @@ -1108,8 +1105,9 @@ static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb) hash = jhash(skb->data, 2 * ETH_ALEN, (__force u32) skb->protocol); - return htons((((u64) hash * range) >> 32) + vxlan->port_min); + return htons((((u64) hash * range) >> 32) + port_min); } +EXPORT_SYMBOL_GPL(vxlan_src_port); static int handle_offloads(struct sk_buff *skb) { @@ -1125,6 +1123,45 @@ static int handle_offloads(struct sk_buff *skb) return 0; } +int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, + struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, + __be16 src_port, __be16 dst_port, __be32 vni) +{ + struct vxlanhdr *vxh; + struct udphdr *uh; + int err; + + if (!skb->encapsulation) { + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + } + + vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); + vxh->vx_flags = htonl(VXLAN_FLAGS); + vxh->vx_vni = vni; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + + uh->len = htons(skb->len); + uh->check = 0; + + vxlan_set_owner(vs->sock->sk, skb); + + err = handle_offloads(skb); + if (err) + return err; + + return iptunnel_xmit(net, rt, skb, src, dst, + IPPROTO_UDP, tos, ttl, df); +} +EXPORT_SYMBOL_GPL(vxlan_xmit_skb); + /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct vxlan_dev *dst_vxlan) @@ -1162,8 +1199,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *vxlan = netdev_priv(dev); struct rtable *rt; const struct iphdr *old_iph; - struct vxlanhdr *vxh; - struct udphdr *uh; struct flowi4 fl4; __be32 dst; __be16 src_port, dst_port; @@ -1185,11 +1220,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; } - if (!skb->encapsulation) { - skb_reset_inner_headers(skb); - skb->encapsulation = 1; - } - /* Need space for new headers (invalidates iph ptr) */ if (skb_cow_head(skb, VXLAN_HEADROOM)) goto drop; @@ -1204,7 +1234,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); - src_port = vxlan_src_port(vxlan, skb); + src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb); memset(&fl4, 0, sizeof(fl4)); fl4.flowi4_oif = rdst->remote_ifindex; @@ -1221,9 +1251,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (rt->dst.dev == dev) { netdev_dbg(dev, "circular route to %pI4\n", &dst); - ip_rt_put(rt); dev->stats.collisions++; - goto tx_error; + goto rt_tx_error; } /* Bypass encapsulation if the destination is local */ @@ -1238,30 +1267,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vxlan_encap_bypass(skb, vxlan, dst_vxlan); return; } - vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); - vxh->vx_flags = htonl(VXLAN_FLAGS); - vxh->vx_vni = htonl(vni << 8); - - __skb_push(skb, sizeof(*uh)); - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - uh->dest = dst_port; - uh->source = src_port; - - uh->len = htons(skb->len); - uh->check = 0; - - vxlan_set_owner(dev, skb); - - if (handle_offloads(skb)) - goto drop; tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst, - IPPROTO_UDP, tos, ttl, df); + err = vxlan_xmit_skb(dev_net(dev), vxlan->vn_sock, rt, skb, + fl4.saddr, dst, tos, ttl, df, + src_port, dst_port, htonl(vni << 8)); + + if (err < 0) + goto rt_tx_error; iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return; @@ -1270,6 +1285,8 @@ drop: dev->stats.tx_dropped++; goto tx_free; +rt_tx_error: + ip_rt_put(rt); tx_error: dev->stats.tx_errors++; tx_free: diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 43de275..ad342e3 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -28,4 +28,12 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, bool no_share); void vxlan_sock_release(struct vxlan_sock *vs); + +int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, + struct rtable *rt, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, + __be16 src_port, __be16 dst_port, __be32 vni); + +__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb); + #endif -- cgit v0.10.2 From 649c5b8bdd8d5c8c9c50882e3a789030fb99b75c Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 19 Aug 2013 11:23:22 -0700 Subject: vxlan: Improve vxlan headroom calculation. Rather than having static headroom calculation, adjust headroom according to target device. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f3496e9..73e2557 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1130,6 +1130,7 @@ int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, { struct vxlanhdr *vxh; struct udphdr *uh; + int min_headroom; int err; if (!skb->encapsulation) { @@ -1137,6 +1138,14 @@ int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, skb->encapsulation = 1; } + min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + + VXLAN_HLEN + sizeof(struct iphdr); + + /* Need space for new headers (invalidates iph ptr) */ + err = skb_cow_head(skb, min_headroom); + if (unlikely(err)) + return err; + vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; @@ -1220,10 +1229,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; } - /* Need space for new headers (invalidates iph ptr) */ - if (skb_cow_head(skb, VXLAN_HEADROOM)) - goto drop; - old_iph = ip_hdr(skb); ttl = vxlan->ttl; -- cgit v0.10.2 From 1eaa81785a70082213d40de14ec13520b5c6fff2 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 19 Aug 2013 11:23:29 -0700 Subject: vxlan: Add tx-vlan offload support. Following patch allows transmit side vlan offload for vxlan devices. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 73e2557..b9401b5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -1139,13 +1140,23 @@ int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, } min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len - + VXLAN_HLEN + sizeof(struct iphdr); + + VXLAN_HLEN + sizeof(struct iphdr) + + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); if (unlikely(err)) return err; + if (vlan_tx_tag_present(skb)) { + if (WARN_ON(!__vlan_put_tag(skb, + skb->vlan_proto, + vlan_tx_tag_get(skb)))) + return -ENOMEM; + + skb->vlan_tci = 0; + } + vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; @@ -1560,8 +1571,11 @@ static void vxlan_setup(struct net_device *dev) dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; + dev->vlan_features = dev->features; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; -- cgit v0.10.2 From 58264848a5a7b91195f43c4729072e8cc980288d Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 19 Aug 2013 11:23:34 -0700 Subject: openvswitch: Add vxlan tunneling support. Following patch adds vxlan vport type for openvswitch using vxlan api. So now there is vxlan dependency for openvswitch. CC: Jesse Gross Signed-off-by: Pravin B Shelar Acked-by: Jesse Gross Signed-off-by: David S. Miller diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index c55efaa..52490b0 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -165,6 +165,7 @@ enum ovs_vport_type { OVS_VPORT_TYPE_NETDEV, /* network device */ OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ OVS_VPORT_TYPE_GRE, /* GRE tunnel. */ + OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */ __OVS_VPORT_TYPE_MAX }; @@ -211,6 +212,16 @@ enum ovs_vport_attr { #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1) +/* OVS_VPORT_ATTR_OPTIONS attributes for tunnels. + */ +enum { + OVS_TUNNEL_ATTR_UNSPEC, + OVS_TUNNEL_ATTR_DST_PORT, /* 16-bit UDP port, used by L4 tunnels. */ + __OVS_TUNNEL_ATTR_MAX +}; + +#define OVS_TUNNEL_ATTR_MAX (__OVS_TUNNEL_ATTR_MAX - 1) + /* Flows. */ #define OVS_FLOW_FAMILY "ovs_flow" diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 27ee56b..bed30e6 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -40,3 +40,16 @@ config OPENVSWITCH_GRE Say N to exclude this support and reduce the binary size. If unsure, say Y. + +config OPENVSWITCH_VXLAN + bool "Open vSwitch VXLAN tunneling support" + depends on INET + depends on OPENVSWITCH + depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m) + default y + ---help--- + If you say Y here, then the Open vSwitch will be able create vxlan vport. + + Say N to exclude this support and reduce the binary size. + + If unsure, say Y. diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 01bddb2..82e4ee5 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile @@ -13,3 +13,7 @@ openvswitch-y := \ vport-gre.o \ vport-internal_dev.o \ vport-netdev.o + +ifneq ($(CONFIG_OPENVSWITCH_VXLAN),) +openvswitch-y += vport-vxlan.o +endif diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c new file mode 100644 index 0000000..36848bd --- /dev/null +++ b/net/openvswitch/vport-vxlan.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2013 Nicira, Inc. + * Copyright (c) 2013 Cisco Systems, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "datapath.h" +#include "vport.h" + +/** + * struct vxlan_port - Keeps track of open UDP ports + * @vs: vxlan_sock created for the port. + * @name: vport name. + */ +struct vxlan_port { + struct vxlan_sock *vs; + char name[IFNAMSIZ]; +}; + +static inline struct vxlan_port *vxlan_vport(const struct vport *vport) +{ + return vport_priv(vport); +} + +/* Called with rcu_read_lock and BH disabled. */ +static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) +{ + struct ovs_key_ipv4_tunnel tun_key; + struct vport *vport = vs->data; + struct iphdr *iph; + __be64 key; + + /* Save outer tunnel values */ + iph = ip_hdr(skb); + key = cpu_to_be64(ntohl(vx_vni) >> 8); + ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY); + + ovs_vport_receive(vport, skb, &tun_key); +} + +static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb) +{ + struct vxlan_port *vxlan_port = vxlan_vport(vport); + __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; + + if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port))) + return -EMSGSIZE; + return 0; +} + +static void vxlan_tnl_destroy(struct vport *vport) +{ + struct vxlan_port *vxlan_port = vxlan_vport(vport); + + vxlan_sock_release(vxlan_port->vs); + + ovs_vport_deferred_free(vport); +} + +static struct vport *vxlan_tnl_create(const struct vport_parms *parms) +{ + struct net *net = ovs_dp_get_net(parms->dp); + struct nlattr *options = parms->options; + struct vxlan_port *vxlan_port; + struct vxlan_sock *vs; + struct vport *vport; + struct nlattr *a; + u16 dst_port; + int err; + + if (!options) { + err = -EINVAL; + goto error; + } + a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT); + if (a && nla_len(a) == sizeof(u16)) { + dst_port = nla_get_u16(a); + } else { + /* Require destination port from userspace. */ + err = -EINVAL; + goto error; + } + + vport = ovs_vport_alloc(sizeof(struct vxlan_port), + &ovs_vxlan_vport_ops, parms); + if (IS_ERR(vport)) + return vport; + + vxlan_port = vxlan_vport(vport); + strncpy(vxlan_port->name, parms->name, IFNAMSIZ); + + vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true); + if (IS_ERR(vs)) { + ovs_vport_free(vport); + return (void *)vs; + } + vxlan_port->vs = vs; + + return vport; + +error: + return ERR_PTR(err); +} + +static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) +{ + struct net *net = ovs_dp_get_net(vport->dp); + struct vxlan_port *vxlan_port = vxlan_vport(vport); + __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; + struct rtable *rt; + struct flowi4 fl; + __be16 src_port; + int port_min; + int port_max; + __be16 df; + int err; + + if (unlikely(!OVS_CB(skb)->tun_key)) { + err = -EINVAL; + goto error; + } + + /* Route lookup */ + memset(&fl, 0, sizeof(fl)); + fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; + fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); + fl.flowi4_mark = skb->mark; + fl.flowi4_proto = IPPROTO_UDP; + + rt = ip_route_output_key(net, &fl); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + goto error; + } + + df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? + htons(IP_DF) : 0; + + skb->local_df = 1; + + inet_get_local_port_range(&port_min, &port_max); + src_port = vxlan_src_port(port_min, port_max, skb); + + err = vxlan_xmit_skb(net, vxlan_port->vs, rt, skb, + fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, + OVS_CB(skb)->tun_key->ipv4_tos, + OVS_CB(skb)->tun_key->ipv4_ttl, df, + src_port, dst_port, + htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8)); + if (err < 0) + ip_rt_put(rt); +error: + return err; +} + +static const char *vxlan_get_name(const struct vport *vport) +{ + struct vxlan_port *vxlan_port = vxlan_vport(vport); + return vxlan_port->name; +} + +const struct vport_ops ovs_vxlan_vport_ops = { + .type = OVS_VPORT_TYPE_VXLAN, + .create = vxlan_tnl_create, + .destroy = vxlan_tnl_destroy, + .get_name = vxlan_get_name, + .get_options = vxlan_get_options, + .send = vxlan_tnl_send, +}; diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index d4c7fa0..d69e0c0 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -42,6 +42,9 @@ static const struct vport_ops *vport_ops_list[] = { #ifdef CONFIG_OPENVSWITCH_GRE &ovs_gre_vport_ops, #endif +#ifdef CONFIG_OPENVSWITCH_VXLAN + &ovs_vxlan_vport_ops, +#endif }; /* Protected by RCU read lock for reading, ovs_mutex for writing. */ diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 376045c..1a9fbce 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -199,6 +199,7 @@ void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); extern const struct vport_ops ovs_netdev_vport_ops; extern const struct vport_ops ovs_internal_vport_ops; extern const struct vport_ops ovs_gre_vport_ops; +extern const struct vport_ops ovs_vxlan_vport_ops; static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) -- cgit v0.10.2 From 15ca140f18534408cdf64d74e843e10cdca707fd Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 16 Aug 2013 19:07:13 -0400 Subject: qlcnic: Enable support for 844X adapter Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7387354..9ae5d2e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1972,9 +1972,11 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops; __func__, ##_args); \ } while (0) -#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 +#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 +#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 -#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 +#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040 +#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440 static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) { @@ -1988,6 +1990,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) bool status; status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_QLE844X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; return status; @@ -2001,7 +2005,11 @@ static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter) static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) { unsigned short device = adapter->pdev->device; + bool status; + + status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false; - return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; + return status; } #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index f807f3b..cec0908 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -147,10 +147,7 @@ static inline u8 qlcnic_mac_hash(u64 mac) static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, u16 handle, u8 ring_id) { - unsigned short device = adapter->pdev->device; - - if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || - (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) + if (qlcnic_83xx_check(adapter)) return handle | (ring_id << 15); else return handle; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index a780b73..1f97a73 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -100,6 +100,8 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), + ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), {0,} }; @@ -146,6 +148,11 @@ static const u32 qlcnic_reg_tbl[] = { static const struct qlcnic_board_info qlcnic_boards[] = { { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE844X, + 0x0, + 0x0, + "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x24e, @@ -829,7 +836,9 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) *bar = QLCNIC_82XX_BAR0_LENGTH; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE844X: case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: *bar = QLCNIC_83XX_BAR0_LENGTH; break; default: @@ -2048,9 +2057,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE844X: qlcnic_83xx_register_map(ahw); break; case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: qlcnic_sriov_vf_register_map(ahw); break; default: -- cgit v0.10.2 From 58945e1bd6e7ed8d3637c655ebdfdc34fa996456 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 16 Aug 2013 19:07:14 -0400 Subject: qlcnic: Add PVID support for 84xx adapters o 84xx adapters support VLAN stripping for PVID. Packets don't have VLAN tag inserted in case of PVID. So packet should follow non vlan path. o Use capability bit to set PVID mode. Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index dd22ef3..b89b074 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -404,6 +404,7 @@ enum qlcnic_83xx_states { #define QLC_83XX_MAX_MC_COUNT 38 #define QLC_83XX_MAX_UC_COUNT 4096 +#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22 #define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) #define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) #define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index dc24979..b2fbefc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -398,10 +398,14 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, } static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd) + struct qlcnic_cmd_args *cmd, u32 cap) { - adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff; - adapter->flags &= ~QLCNIC_TAGGING_ENABLED; + if (cap & QLC_83XX_PVID_STRIP_CAPABILITY) { + adapter->rx_pvid = 0; + } else { + adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff; + adapter->flags &= ~QLCNIC_TAGGING_ENABLED; + } return 0; } @@ -432,12 +436,14 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, return 0; } -static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) +static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter, + struct qlcnic_info *info) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_cmd_args cmd; - int ret; + int ret, cap; + cap = info->capabilities; ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); if (ret) return ret; @@ -453,7 +459,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); break; case QLC_PVID_MODE: - ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); + ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd, cap); break; } } @@ -476,7 +482,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) if (err) return -EIO; - err = qlcnic_sriov_get_vf_acl(adapter); + err = qlcnic_sriov_get_vf_acl(adapter, &nic_info); if (err) return err; -- cgit v0.10.2 From 77bead466cfb0bc352e424eacdd17700aca851b3 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 16 Aug 2013 19:07:15 -0400 Subject: qlcnic: Loopback Inter Driver Communication AEN handler o Loopback initiator function drivers should process loopback time extend AEN. These AENs are triggered by the loopback time extend mailbox command issued by the target function drivers. Signed-off-by: Manish Chopra Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 9ae5d2e..86dd695 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -467,6 +467,7 @@ struct qlcnic_hardware_context { u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; u32 mbox_reg[4]; struct qlcnic_mailbox *mailbox; + u8 extend_lb_time; }; struct qlcnic_adapter_stats { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index e53caaa..c5daf78 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -860,9 +860,9 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; u32 event[QLC_83XX_MBX_AEN_CNT]; int i; - struct qlcnic_hardware_context *ahw = adapter->ahw; for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) event[i] = readl(QLCNIC_MBX_FW(ahw, i)); @@ -882,6 +882,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) &adapter->idc_aen_work, 0); break; case QLCNIC_MBX_TIME_EXTEND_EVENT: + ahw->extend_lb_time = event[1] >> 8 & 0xf; break; case QLCNIC_MBX_BC_EVENT: qlcnic_sriov_handle_bc_event(adapter, event[1]); @@ -1706,13 +1707,28 @@ fail_diag_alloc: return ret; } +static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter, + u32 *max_wait_count) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int temp; + + netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n", + ahw->extend_lb_time); + temp = ahw->extend_lb_time * 1000; + *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT; + ahw->extend_lb_time = 0; +} + int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct net_device *netdev = adapter->netdev; + u32 config, max_wait_count; int status = 0, loop = 0; - u32 config; + ahw->extend_lb_time = 0; + max_wait_count = QLC_83XX_LB_WAIT_COUNT; status = qlcnic_83xx_get_port_config(adapter); if (status) return status; @@ -1754,9 +1770,14 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return -EBUSY; } - if (loop++ > QLC_83XX_LB_WAIT_COUNT) { - netdev_err(netdev, - "Did not receive IDC completion AEN\n"); + + if (ahw->extend_lb_time) + qlcnic_extend_lb_idc_cmpltn_wait(adapter, + &max_wait_count); + + if (loop++ > max_wait_count) { + netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n", + __func__); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); qlcnic_83xx_clear_lb_mode(adapter, mode); return -ETIMEDOUT; @@ -1771,10 +1792,12 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 config = ahw->port_config, max_wait_count; struct net_device *netdev = adapter->netdev; int status = 0, loop = 0; - u32 config = ahw->port_config; + ahw->extend_lb_time = 0; + max_wait_count = QLC_83XX_LB_WAIT_COUNT; set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); if (mode == QLCNIC_ILB_MODE) ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS; @@ -1802,9 +1825,13 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) return -EBUSY; } - if (loop++ > QLC_83XX_LB_WAIT_COUNT) { - netdev_err(netdev, - "Did not receive IDC completion AEN\n"); + if (ahw->extend_lb_time) + qlcnic_extend_lb_idc_cmpltn_wait(adapter, + &max_wait_count); + + if (loop++ > max_wait_count) { + netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n", + __func__); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return -ETIMEDOUT; } -- cgit v0.10.2 From fef349ce37b2edafea34a73d4e8a719214fc6ffc Mon Sep 17 00:00:00 2001 From: Pratik Pujar Date: Fri, 16 Aug 2013 19:07:16 -0400 Subject: qlcnic: Add support for 84xx adapters to load firmware from file o Use appropriate firmware image file name based on device IDs. Signed-off-by: Pratik Pujar Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index b89b074..d4c58c6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -84,9 +84,11 @@ /* Firmware image definitions */ #define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 #define QLC_83XX_FW_FILE_NAME "83xx_fw.bin" +#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin" #define QLC_83XX_BOOT_FROM_FLASH 0 #define QLC_83XX_BOOT_FROM_FILE 0x12345678 +#define QLC_FW_FILE_NAME_LEN 20 #define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 #define QLC_83XX_MBX_POST_BC_OP 0x1 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index c97e2e0..f23e667 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1948,12 +1948,36 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } +static inline void qlcnic_83xx_get_fw_file_name(struct qlcnic_adapter *adapter, + char *file_name) +{ + struct pci_dev *pdev = adapter->pdev; + + memset(file_name, 0, QLC_FW_FILE_NAME_LEN); + + switch (pdev->device) { + case PCI_DEVICE_ID_QLOGIC_QLE834X: + strncpy(file_name, QLC_83XX_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + break; + case PCI_DEVICE_ID_QLOGIC_QLE844X: + strncpy(file_name, QLC_84XX_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + break; + default: + dev_err(&pdev->dev, "%s: Invalid device id\n", + __func__); + } +} + static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) { + char fw_file_name[QLC_FW_FILE_NAME_LEN]; int err = -EIO; - if (request_firmware(&adapter->ahw->fw_info.fw, - QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) { + qlcnic_83xx_get_fw_file_name(adapter, fw_file_name); + if (request_firmware(&adapter->ahw->fw_info.fw, fw_file_name, + &(adapter->pdev->dev))) { dev_err(&adapter->pdev->dev, "No file FW image, loading flash FW image.\n"); QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, -- cgit v0.10.2 From 4cffa13d2d6c746bcbe460facaa6a9f22e9cda1f Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Fri, 16 Aug 2013 19:07:17 -0400 Subject: qlcnic: Update version to 5.3.47 Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 86dd695..3dcc666 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -36,9 +36,9 @@ #include "qlcnic_83xx_hw.h" #define _QLCNIC_LINUX_MAJOR 5 -#define _QLCNIC_LINUX_MINOR 2 -#define _QLCNIC_LINUX_SUBVERSION 46 -#define QLCNIC_LINUX_VERSIONID "5.2.46" +#define _QLCNIC_LINUX_MINOR 3 +#define _QLCNIC_LINUX_SUBVERSION 47 +#define QLCNIC_LINUX_VERSIONID "5.3.47" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v0.10.2 From 84ce1ddfefc3d5a8af5ede6fe16546c143117616 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 16 Aug 2013 21:59:54 +0200 Subject: 6lowpan: init ipv6hdr buffer to zero This patch simplify the handling to set fields inside of struct ipv6hdr to zero. Instead of setting some memory regions with memset to zero we initialize the whole ipv6hdr to zero. This is a simplification for parsing the 6lowpan header for the upcomming patches. Signed-off-by: Alexander Aring Reviewed-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 3b9d5f2..92429db 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -223,10 +223,6 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, if (prefcount > 0) memcpy(ipaddr, prefix, prefcount); - if (prefcount + postcount < 16) - memset(&ipaddr->s6_addr[prefcount], 0, - 16 - (prefcount + postcount)); - if (postcount > 0) { memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount); skb_pull(skb, postcount); @@ -723,7 +719,7 @@ frame_err: static int lowpan_process_data(struct sk_buff *skb) { - struct ipv6hdr hdr; + struct ipv6hdr hdr = {}; u8 tmp, iphc0, iphc1, num_context = 0; u8 *_saddr, *_daddr; int err; @@ -868,8 +864,6 @@ lowpan_process_data(struct sk_buff *skb) hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); - hdr.flow_lbl[1] = 0; - hdr.flow_lbl[2] = 0; break; /* * Flow Label carried in-line @@ -885,10 +879,6 @@ lowpan_process_data(struct sk_buff *skb) break; /* Traffic Class and Flow Label are elided */ case 3: /* 11b */ - hdr.priority = 0; - hdr.flow_lbl[0] = 0; - hdr.flow_lbl[1] = 0; - hdr.flow_lbl[2] = 0; break; default: break; -- cgit v0.10.2 From 31afe1f73e46221650acfcb411e6949f4a8f7571 Mon Sep 17 00:00:00 2001 From: David Hauweele Date: Fri, 16 Aug 2013 21:59:55 +0200 Subject: 6lowpan: Fix fragmentation with link-local compressed addresses When a new 6lowpan fragment is received, a skbuff is allocated for the reassembled packet. However when a 6lowpan packet compresses link-local addresses based on link-layer addresses, the processing function relies on the skb mac control block to find the related link-layer address. This patch copies the control block from the first fragment into the newly allocated skb to keep a trace of the link-layer addresses in case of a link-local compressed address. Edit: small changes on comment issue Signed-off-by: David Hauweele Signed-off-by: Alexander Aring Reviewed-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 92429db..632b3fd 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -698,6 +698,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag) skb_reserve(frame->skb, sizeof(struct ipv6hdr)); skb_put(frame->skb, frame->length); + /* copy the first control block to keep a + * trace of the link-layer addresses in case + * of a link-local compressed address + */ + memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb)); + init_timer(&frame->timer); /* time out is the same as for ipv6 - 60 sec */ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; -- cgit v0.10.2 From 4666669fc3b83f0d01b08c6b17432563f2eedcba Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 16 Aug 2013 21:59:56 +0200 Subject: 6lowpan: introduce lowpan_fetch_skb function This patch adds a helper function to parse the ipv6 header to a 6lowpan header in stream. This function checks first if we can pull data with a specific length from a skb. If this seems to be okay, we copy skb data to a destination pointer and run skb_pull. Signed-off-by: Alexander Aring Reviewed-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 4b8f917..a636545 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -230,4 +230,16 @@ dest = 16 bit inline */ #define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */ +static inline bool lowpan_fetch_skb(struct sk_buff *skb, + void *data, const unsigned int len) +{ + if (unlikely(!pskb_may_pull(skb, len))) + return true; + + skb_copy_from_linear_data(skb, data, len); + skb_pull(skb, len); + + return false; +} + #endif /* __6LOWPAN_H__ */ -- cgit v0.10.2 From 84c2e7bcf531a3881d602035243eda80e62d700a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 16 Aug 2013 21:59:57 +0200 Subject: 6lowpan: add function to uncompress multicast addr Add function to uncompress multicast address. This function split the uncompress function for a multicast address in a seperate function. To uncompress a multicast address is different than a other non-multicasts addresses according to rfc6282. Signed-off-by: Alexander Aring Reviewed-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 632b3fd..9aea7ce 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -88,15 +88,6 @@ static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20}; */ static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80}; -/* - * Uncompression of ctx-base - * 0 -> 0 bits from packet - * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet - * 2 -> 2 bytes from prefix - zeroes + 3 from packet - * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr - */ -static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21}; - /* Link local prefix */ static const u8 lowpan_llprefix[] = {0xfe, 0x80}; @@ -240,6 +231,63 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, return 0; } +/* Uncompress function for multicast destination address, + * when M bit is set. + */ +static int +lowpan_uncompress_multicast_daddr(struct sk_buff *skb, + struct in6_addr *ipaddr, + const u8 dam) +{ + bool fail; + + switch (dam) { + case LOWPAN_IPHC_DAM_00: + /* 00: 128 bits. The full address + * is carried in-line. + */ + fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16); + break; + case LOWPAN_IPHC_DAM_01: + /* 01: 48 bits. The address takes + * the form ffXX::00XX:XXXX:XXXX. + */ + ipaddr->s6_addr[0] = 0xFF; + fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1); + fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5); + break; + case LOWPAN_IPHC_DAM_10: + /* 10: 32 bits. The address takes + * the form ffXX::00XX:XXXX. + */ + ipaddr->s6_addr[0] = 0xFF; + fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1); + fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3); + break; + case LOWPAN_IPHC_DAM_11: + /* 11: 8 bits. The address takes + * the form ff02::00XX. + */ + ipaddr->s6_addr[0] = 0xFF; + ipaddr->s6_addr[1] = 0x02; + fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1); + break; + default: + pr_debug("DAM value has a wrong value: 0x%x\n", dam); + return -EINVAL; + } + + if (fail) { + pr_debug("Failed to fetch skb data\n"); + return -EIO; + } + + lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n", + ipaddr->s6_addr, 16); + + return 0; +} + static void lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) { @@ -927,16 +975,8 @@ lowpan_process_data(struct sk_buff *skb) pr_debug("dest: context-based mcast compression\n"); /* TODO: implement this */ } else { - u8 prefix[] = {0xff, 0x02}; - - pr_debug("dest: non context-based mcast compression\n"); - if (0 < tmp && tmp < 3) { - if (lowpan_fetch_skb_u8(skb, &prefix[1])) - goto drop; - } - - err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix, - lowpan_unc_mxconf[tmp], NULL); + err = lowpan_uncompress_multicast_daddr( + skb, &hdr.daddr, tmp); if (err) goto drop; } -- cgit v0.10.2 From ce2463b283a0cb63e0e4de5e7d971b4c92be542a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 16 Aug 2013 21:59:58 +0200 Subject: 6lowpan: lowpan_uncompress_addr with address_mode This patch drops the pre and postcount calculation from the lowpan_uncompress_addr function.We use instead a switch/case over address_mode value. The original implementation has several bugs in this function and it was hard to decrypt how it works. To make it maintainable and fix these bugs this patch basically reimplements lowpan_uncompress_addr from scratch. A list of bugs we found in the current implementation: 1) Properly support uncompression of short-address based IPv6 addresses (instead of basically copying garbage) 2) Fix use and uncompression of long-addresses based IPv6 addresses 3) Add missing ff:fe00 in the case of SAM/DAM = 2 and M = 0 Signed-off-by: Alexander Aring Reviewed-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 9aea7ce..5ef9157 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -67,30 +67,6 @@ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255}; static LIST_HEAD(lowpan_devices); -/* - * Uncompression of linklocal: - * 0 -> 16 bytes from packet - * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet - * 2 -> 2 bytes from prefix - zeroes + 2 from packet - * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr - * - * NOTE: => the uncompress function does change 0xf to 0x10 - * NOTE: 0x00 => no-autoconfig => unspecified - */ -static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20}; - -/* - * Uncompression of ctx-based: - * 0 -> 0 bits from packet [unspecified / reserved] - * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet - * 2 -> 8 bytes from prefix - zeroes + 2 from packet - * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr - */ -static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80}; - -/* Link local prefix */ -static const u8 lowpan_llprefix[] = {0xfe, 0x80}; - /* private device info */ struct lowpan_dev_info { struct net_device *real_dev; /* real WPAN device ptr */ @@ -182,51 +158,86 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr, return rol8(val, shift); } -static void -lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) -{ - memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN); - /* second bit-flip (Universe/Local) is done according RFC2464 */ - ipaddr->s6_addr[8] ^= 0x02; -} - /* - * Uncompress addresses based on a prefix and a postfix with zeroes in - * between. If the postfix is zero in length it will use the link address - * to configure the IP address (autoconf style). - * pref_post_count takes a byte where the first nibble specify prefix count - * and the second postfix count (NOTE: 15/0xf => 16 bytes copy). + * Uncompress address function for source and + * destination address(non-multicast). + * + * address_mode is sam value or dam value. */ static int -lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, - u8 const *prefix, u8 pref_post_count, unsigned char *lladdr) +lowpan_uncompress_addr(struct sk_buff *skb, + struct in6_addr *ipaddr, + const u8 address_mode, + const struct ieee802154_addr *lladdr) { - u8 prefcount = pref_post_count >> 4; - u8 postcount = pref_post_count & 0x0f; - - /* full nibble 15 => 16 */ - prefcount = (prefcount == 15 ? 16 : prefcount); - postcount = (postcount == 15 ? 16 : postcount); - - if (lladdr) - lowpan_raw_dump_inline(__func__, "linklocal address", - lladdr, IEEE802154_ADDR_LEN); - if (prefcount > 0) - memcpy(ipaddr, prefix, prefcount); - - if (postcount > 0) { - memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount); - skb_pull(skb, postcount); - } else if (prefcount > 0) { - if (lladdr == NULL) + bool fail; + + switch (address_mode) { + case LOWPAN_IPHC_ADDR_00: + /* for global link addresses */ + fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16); + break; + case LOWPAN_IPHC_ADDR_01: + /* fe:80::XXXX:XXXX:XXXX:XXXX */ + ipaddr->s6_addr[0] = 0xFE; + ipaddr->s6_addr[1] = 0x80; + fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8); + break; + case LOWPAN_IPHC_ADDR_02: + /* fe:80::ff:fe00:XXXX */ + ipaddr->s6_addr[0] = 0xFE; + ipaddr->s6_addr[1] = 0x80; + ipaddr->s6_addr[11] = 0xFF; + ipaddr->s6_addr[12] = 0xFE; + fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2); + break; + case LOWPAN_IPHC_ADDR_03: + fail = false; + switch (lladdr->addr_type) { + case IEEE802154_ADDR_LONG: + /* fe:80::XXXX:XXXX:XXXX:XXXX + * \_________________/ + * hwaddr + */ + ipaddr->s6_addr[0] = 0xFE; + ipaddr->s6_addr[1] = 0x80; + memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr, + IEEE802154_ADDR_LEN); + /* second bit-flip (Universe/Local) + * is done according RFC2464 + */ + ipaddr->s6_addr[8] ^= 0x02; + break; + case IEEE802154_ADDR_SHORT: + /* fe:80::ff:fe00:XXXX + * \__/ + * short_addr + * + * Universe/Local bit is zero. + */ + ipaddr->s6_addr[0] = 0xFE; + ipaddr->s6_addr[1] = 0x80; + ipaddr->s6_addr[11] = 0xFF; + ipaddr->s6_addr[12] = 0xFE; + ipaddr->s6_addr16[7] = htons(lladdr->short_addr); + break; + default: + pr_debug("Invalid addr_type set\n"); return -EINVAL; + } + break; + default: + pr_debug("Invalid address mode value: 0x%x\n", address_mode); + return -EINVAL; + } - /* no IID based configuration if no prefix and no data */ - lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr); + if (fail) { + pr_debug("Failed to fetch skb data\n"); + return -EIO; } - pr_debug("uncompressing %d + %d => ", prefcount, postcount); - lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16); + lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n", + ipaddr->s6_addr, 16); return 0; } @@ -775,7 +786,7 @@ lowpan_process_data(struct sk_buff *skb) { struct ipv6hdr hdr = {}; u8 tmp, iphc0, iphc1, num_context = 0; - u8 *_saddr, *_daddr; + const struct ieee802154_addr *_saddr, *_daddr; int err; lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, @@ -878,8 +889,8 @@ lowpan_process_data(struct sk_buff *skb) if (lowpan_fetch_skb_u8(skb, &iphc1)) goto drop; - _saddr = mac_cb(skb)->sa.hwaddr; - _daddr = mac_cb(skb)->da.hwaddr; + _saddr = &mac_cb(skb)->sa; + _daddr = &mac_cb(skb)->da; pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1); @@ -961,8 +972,7 @@ lowpan_process_data(struct sk_buff *skb) /* Source address uncompression */ pr_debug("source address stateless compression\n"); - err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix, - lowpan_unc_llconf[tmp], skb->data); + err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr); if (err) goto drop; @@ -982,8 +992,7 @@ lowpan_process_data(struct sk_buff *skb) } } else { pr_debug("dest: stateless compression\n"); - err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix, - lowpan_unc_llconf[tmp], skb->data); + err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr); if (err) goto drop; } diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index a636545..2869c05 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -193,10 +193,12 @@ /* Values of fields within the IPHC encoding second byte */ #define LOWPAN_IPHC_CID 0x80 +#define LOWPAN_IPHC_ADDR_00 0x00 +#define LOWPAN_IPHC_ADDR_01 0x01 +#define LOWPAN_IPHC_ADDR_02 0x02 +#define LOWPAN_IPHC_ADDR_03 0x03 + #define LOWPAN_IPHC_SAC 0x40 -#define LOWPAN_IPHC_SAM_00 0x00 -#define LOWPAN_IPHC_SAM_01 0x10 -#define LOWPAN_IPHC_SAM_10 0x20 #define LOWPAN_IPHC_SAM 0x30 #define LOWPAN_IPHC_SAM_BIT 4 -- cgit v0.10.2 From 65d892c8ac8f131f668edd55db291a7c68f8338f Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 16 Aug 2013 21:59:59 +0200 Subject: 6lowpan: handle context based source address Handle context based address when an unspecified address is given. For other context based address we print a warning and drop the packet because we don't support it right now. Signed-off-by: Alexander Aring Reviewed-by: Werner Almesberger Signed-off-by: David S. Miller diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 5ef9157..c85e71e 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -242,6 +242,40 @@ lowpan_uncompress_addr(struct sk_buff *skb, return 0; } +/* Uncompress address function for source context + * based address(non-multicast). + */ +static int +lowpan_uncompress_context_based_src_addr(struct sk_buff *skb, + struct in6_addr *ipaddr, + const u8 sam) +{ + switch (sam) { + case LOWPAN_IPHC_ADDR_00: + /* unspec address :: + * Do nothing, address is already :: + */ + break; + case LOWPAN_IPHC_ADDR_01: + /* TODO */ + case LOWPAN_IPHC_ADDR_02: + /* TODO */ + case LOWPAN_IPHC_ADDR_03: + /* TODO */ + netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam); + return -EINVAL; + default: + pr_debug("Invalid sam value: 0x%x\n", sam); + return -EINVAL; + } + + lowpan_raw_dump_inline(NULL, + "Reconstructed context based ipv6 src addr is:\n", + ipaddr->s6_addr, 16); + + return 0; +} + /* Uncompress function for multicast destination address, * when M bit is set. */ @@ -970,9 +1004,18 @@ lowpan_process_data(struct sk_buff *skb) /* Extract SAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03; - /* Source address uncompression */ - pr_debug("source address stateless compression\n"); - err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr); + if (iphc1 & LOWPAN_IPHC_SAC) { + /* Source address context based uncompression */ + pr_debug("SAC bit is set. Handle context based source address.\n"); + err = lowpan_uncompress_context_based_src_addr( + skb, &hdr.saddr, tmp); + } else { + /* Source address uncompression */ + pr_debug("source address stateless compression\n"); + err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr); + } + + /* Check on error of previous branch */ if (err) goto drop; -- cgit v0.10.2 From 92e2b4696202934b4cbf1d6b8fb8729e8d7202e1 Mon Sep 17 00:00:00 2001 From: Neel Patel Date: Fri, 16 Aug 2013 15:47:39 -0700 Subject: drivers/net: enic: Adding support for Cisco Low Latency NIC This patch, - Adds new firmware commands for the new Cisco Low Latency NIC (aka. USNIC). Signed-off-by: Neel Patel Signed-off-by: Nishank Trivedi Signed-off-by: Christian Benvenuti Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 2e37c63..75e842d 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" #define DRV_VERSION "2.1.1.39" -#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" +#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h index 25be273..69f60af 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.h +++ b/drivers/net/ethernet/cisco/enic/enic_res.h @@ -47,6 +47,9 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, int offload_mode, int cq_entry, int sop, int eop, int loopback) { struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + u8 desc_skip_cnt = 1; + u8 compressed_send = 0; + u64 wrid = 0; wq_enet_desc_enc(desc, (u64)dma_addr | VNIC_PADDR_TARGET, @@ -59,7 +62,8 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, (u16)vlan_tag, (u8)loopback); - vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); + vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, + (u8)cq_entry, compressed_send, wrid); } static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, @@ -120,6 +124,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq, dma_addr_t dma_addr, unsigned int len) { struct rq_enet_desc *desc = vnic_rq_next_desc(rq); + u64 wrid = 0; u8 type = os_buf_index ? RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; @@ -127,7 +132,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq, (u64)dma_addr | VNIC_PADDR_TARGET, type, (u16)len); - vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len); + vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid); } struct enic; diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 23d5552..b9a0d78 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -281,11 +281,25 @@ enum vnic_devcmd_cmd { * 0 if no VIF-CONFIG-INFO TLV was ever received. */ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + /* INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46), + /* init_prov_info2: * Variant of CMD_INIT_PROV_INFO, where it will not try to enable * the vnic until CMD_ENABLE2 is issued. * (u64)a0=paddr of vnic_devcmd_provinfo - * (u32)a1=sizeof provision info */ + * (u32)a1=sizeof provision info + */ CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), /* enable2: @@ -339,16 +353,57 @@ enum vnic_devcmd_cmd { CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), /* - * cmd_set_mac_addr - * set mac address + * Set the predefined mac address as default * in: * (u48)a0 = mac addr - * */ CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), + + /* Update the provisioning info of the given VIF + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + + /* Add a filter. + * in: (u64) a0= filter address + * (u32) a1= size of filter + * out: (u32) a0=filter identifier + */ + CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58), + + /* Delete a filter. + * in: (u32) a0=filter identifier + */ + CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59), + + /* Enable a Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u32) a1= command + */ + CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60), + + /* Disable a Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u32) a1= command + */ + CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61), + + /* Stats dump Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u64) a1=host buffer addr for status dump + * (u32) a2=length of the buffer + */ + CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62), + + /* Clear stats for Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + */ + CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63), }; /* CMD_ENABLE2 flags */ +#define CMD_ENABLE2_STANDBY 0x0 #define CMD_ENABLE2_ACTIVE 0x1 /* flags for CMD_OPEN */ @@ -364,6 +419,9 @@ enum vnic_devcmd_cmd { #define CMD_PFILTER_PROMISCUOUS 0x08 #define CMD_PFILTER_ALL_MULTICAST 0x10 +/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */ +#define CMD_QP_RQWQ 0x0 + /* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */ #define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0 #define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1 @@ -390,6 +448,7 @@ enum vnic_devcmd_error { ERR_EMAXRES = 10, ERR_ENOTSUPPORTED = 11, ERR_EINPROGRESS = 12, + ERR_MAX }; /* @@ -435,6 +494,115 @@ struct vnic_devcmd_provinfo { u8 data[0]; }; +/* These are used in flags field of different filters to denote + * valid fields used. + */ +#define FILTER_FIELD_VALID(fld) (1 << (fld - 1)) + +#define FILTER_FIELDS_USNIC ( \ + FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2) | \ + FILTER_FIELD_VALID(3) | \ + FILTER_FIELD_VALID(4)) + +#define FILTER_FIELDS_IPV4_5TUPLE ( \ + FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2) | \ + FILTER_FIELD_VALID(3) | \ + FILTER_FIELD_VALID(4) | \ + FILTER_FIELD_VALID(5)) + +#define FILTER_FIELDS_MAC_VLAN ( \ + FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2)) + +#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2) +#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3) +#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4) + +struct filter_usnic_id { + u32 flags; + u16 vlan; + u16 ethtype; + u8 proto_version; + u32 usnic_id; +} __packed; + +#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1) +#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2) +#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3) +#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4) +#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5) + +/* Enums for the protocol field. */ +enum protocol_e { + PROTO_UDP = 0, + PROTO_TCP = 1, +}; + +struct filter_ipv4_5tuple { + u32 flags; + u32 protocol; + u32 src_addr; + u32 dst_addr; + u16 src_port; + u16 dst_port; +} __packed; + +#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2) + +struct filter_mac_vlan { + u32 flags; + u16 vlan; + u8 mac_addr[6]; +} __packed; + +/* Specifies the filter_action type. */ +enum { + FILTER_ACTION_RQ_STEERING = 0, + FILTER_ACTION_MAX +}; + +struct filter_action { + u32 type; + union { + u32 rq_idx; + } u; +} __packed; + +/* Specifies the filter type. */ +enum filter_type { + FILTER_USNIC_ID = 0, + FILTER_IPV4_5TUPLE = 1, + FILTER_MAC_VLAN = 2, + FILTER_MAX +}; + +struct filter { + u32 type; + union { + struct filter_usnic_id usnic; + struct filter_ipv4_5tuple ipv4; + struct filter_mac_vlan mac_vlan; + } u; +} __packed; + +enum { + CLSF_TLV_FILTER = 0, + CLSF_TLV_ACTION = 1, +}; + +/* Maximum size of buffer to CMD_ADD_FILTER */ +#define FILTER_MAX_BUF_SIZE 100 + +struct filter_tlv { + u_int32_t type; + u_int32_t length; + u_int32_t val[0]; +}; + /* * Writing cmd register causes STAT_BUSY to get set in status register. * When cmd completes, STAT_BUSY will be cleared. diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 7e1488f..36a2ed6 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -30,12 +30,9 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq) { struct vnic_rq_buf *buf; - struct vnic_dev *vdev; unsigned int i, j, count = rq->ring.desc_count; unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); - vdev = rq->vdev; - for (i = 0; i < blks; i++) { rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); if (!rq->bufs[i]) @@ -141,7 +138,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { - u32 fetch_index; + u32 fetch_index = 0; /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index 2056586..ee7bc95 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -72,6 +72,7 @@ struct vnic_rq_buf { unsigned int len; unsigned int index; void *desc; + uint64_t wr_id; }; struct vnic_rq { @@ -110,7 +111,8 @@ static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) static inline void vnic_rq_post(struct vnic_rq *rq, void *os_buf, unsigned int os_buf_index, - dma_addr_t dma_addr, unsigned int len) + dma_addr_t dma_addr, unsigned int len, + uint64_t wrid) { struct vnic_rq_buf *buf = rq->to_use; @@ -118,6 +120,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq, buf->os_buf_index = os_buf_index; buf->dma_addr = dma_addr; buf->len = len; + buf->wr_id = wrid; buf = buf->next; rq->to_use = buf; diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c index 5e0d7a2..3e6b8d5 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -30,12 +30,9 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { struct vnic_wq_buf *buf; - struct vnic_dev *vdev; unsigned int i, j, count = wq->ring.desc_count; unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); - vdev = wq->vdev; - for (i = 0; i < blks; i++) { wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); if (!wq->bufs[i]) diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 7dd937a..2c6c708 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -58,6 +58,10 @@ struct vnic_wq_buf { unsigned int index; int sop; void *desc; + uint64_t wr_id; /* Cookie */ + uint8_t cq_entry; /* Gets completion event from hw */ + uint8_t desc_skip_cnt; /* Num descs to occupy */ + uint8_t compressed_send; /* Both hdr and payload in one desc */ }; /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ @@ -102,14 +106,20 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq) static inline void vnic_wq_post(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, - unsigned int len, int sop, int eop) + unsigned int len, int sop, int eop, + uint8_t desc_skip_cnt, uint8_t cq_entry, + uint8_t compressed_send, uint64_t wrid) { struct vnic_wq_buf *buf = wq->to_use; buf->sop = sop; + buf->cq_entry = cq_entry; + buf->compressed_send = compressed_send; + buf->desc_skip_cnt = desc_skip_cnt; buf->os_buf = eop ? os_buf : NULL; buf->dma_addr = dma_addr; buf->len = len; + buf->wr_id = wrid; buf = buf->next; if (eop) { @@ -123,7 +133,7 @@ static inline void vnic_wq_post(struct vnic_wq *wq, } wq->to_use = buf; - wq->ring.desc_avail--; + wq->ring.desc_avail -= desc_skip_cnt; } static inline void vnic_wq_service(struct vnic_wq *wq, -- cgit v0.10.2 From 0b038566c0ea9f8daceae879bb7ad748ab8c95f0 Mon Sep 17 00:00:00 2001 From: Neel Patel Date: Fri, 16 Aug 2013 15:47:40 -0700 Subject: drivers/net: enic: Add an interface for USNIC to interact with firmware This patch adds an interface for USNIC to proxy firmware commands through ENIC. Signed-off-by: Neel Patel Signed-off-by: Nishank Trivedi Signed-off-by: Christian Benvenuti Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile index e52296d..239e1e4 100644 --- a/drivers/net/ethernet/cisco/enic/Makefile +++ b/drivers/net/ethernet/cisco/enic/Makefile @@ -2,5 +2,5 @@ obj-$(CONFIG_ENIC) := enic.o enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ - enic_ethtool.o + enic_ethtool.o enic_api.o diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 75e842d..cacca29 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -96,6 +96,7 @@ struct enic { #ifdef CONFIG_PCI_IOV u16 num_vfs; #endif + spinlock_t enic_api_lock; struct enic_port_profile *pp; /* work queue cache line section */ diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c new file mode 100644 index 0000000..e13efbd --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_api.c @@ -0,0 +1,48 @@ +/** + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "vnic_dev.h" +#include "vnic_devcmd.h" + +#include "enic_res.h" +#include "enic.h" +#include "enic_api.h" + +int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + int err; + struct enic *enic = netdev_priv(netdev); + struct vnic_dev *vdev = enic->vdev; + + spin_lock(&enic->enic_api_lock); + spin_lock(&enic->devcmd_lock); + + vnic_dev_cmd_proxy_by_index_start(vdev, vf); + err = vnic_dev_cmd(vdev, cmd, a0, a1, wait); + vnic_dev_cmd_proxy_end(vdev); + + spin_unlock(&enic->devcmd_lock); + spin_unlock(&enic->enic_api_lock); + + return err; +} +EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index); diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h new file mode 100644 index 0000000..6b9f925 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_api.h @@ -0,0 +1,30 @@ +/** + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __ENIC_API_H__ +#define __ENIC_API_H__ + +#include + +#include "vnic_dev.h" +#include "vnic_devcmd.h" + +int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait); + +#endif diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b12b32b..7f8891b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1733,6 +1733,7 @@ static void enic_reset(struct work_struct *work) rtnl_lock(); + spin_lock(&enic->enic_api_lock); enic_dev_hang_notify(enic); enic_stop(enic->netdev); enic_dev_hang_reset(enic); @@ -1741,6 +1742,7 @@ static void enic_reset(struct work_struct *work) enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); + spin_unlock(&enic->enic_api_lock); rtnl_unlock(); } @@ -2153,6 +2155,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ spin_lock_init(&enic->devcmd_lock); + spin_lock_init(&enic->enic_api_lock); /* * Set ingress vlan rewrite mode before vnic initialization -- cgit v0.10.2 From d765bb41fde311b57de32bdc9d17965debe8b7af Mon Sep 17 00:00:00 2001 From: Neel Patel Date: Fri, 16 Aug 2013 15:47:41 -0700 Subject: drivers/net: enic: Generate notification of hardware crash This patch generates a hardware crash notification (NETDEV_REBOOT) during reset. After a hardware crash, ENIC resets all its resources including queue pair filters programmed by USNIC. USNIC registers for this notification, and on receiving it, reprograms the queue pair filters. Signed-off-by: Neel Patel Signed-off-by: Nishank Trivedi Signed-off-by: Christian Benvenuti Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index cacca29..be16731 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -32,7 +32,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.39" +#define DRV_VERSION "2.1.1.43" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 7f8891b..bcf15b1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1743,6 +1743,7 @@ static void enic_reset(struct work_struct *work) enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); spin_unlock(&enic->enic_api_lock); + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); rtnl_unlock(); } -- cgit v0.10.2 From e7fb06a1b4d1c5bdcbbf78f7c7e938835395bcdb Mon Sep 17 00:00:00 2001 From: Peter Wu Date: Sat, 17 Aug 2013 01:07:53 +0200 Subject: r8169,sis190: remove unnecessary length check The ethtool core will lower the requested length to the one returned by get_regs_len, therefore no additional check is needed in the get_regs function. Reported-by: Ben Hutchings Signed-off-by: Peter Wu Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b5eb419..93ee49d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1898,9 +1898,6 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, { struct rtl8169_private *tp = netdev_priv(dev); - if (regs->len > R8169_REGS_SIZE) - regs->len = R8169_REGS_SIZE; - rtl_lock_work(tp); memcpy_fromio(p, tp->mmio_addr, regs->len); rtl_unlock_work(tp); diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 02df089..ee18e6f 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1770,9 +1770,6 @@ static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs, struct sis190_private *tp = netdev_priv(dev); unsigned long flags; - if (regs->len > SIS190_REGS_SIZE) - regs->len = SIS190_REGS_SIZE; - spin_lock_irqsave(&tp->lock, flags); memcpy_fromio(p, tp->mmio_addr, regs->len); spin_unlock_irqrestore(&tp->lock, flags); -- cgit v0.10.2 From dcdc7a5929842ca0df91eb50de9edebbb5e4e7d1 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 17 Aug 2013 07:32:09 +0800 Subject: vxlan: using kfree_rcu() to simplify the code The callback function of call_rcu() just calls a kfree(), so we can use kfree_rcu() instead of call_rcu() + callback function. Signed-off-by: Wei Yongjun Acked-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b9401b5..3b21aca 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -542,12 +542,6 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, return 0; } -static void vxlan_fdb_free_rdst(struct rcu_head *head) -{ - struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); - kfree(rd); -} - static void vxlan_fdb_free(struct rcu_head *head) { struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); @@ -687,7 +681,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], */ if (rd && !list_is_singular(&f->remotes)) { list_del_rcu(&rd->list); - call_rcu(&rd->rcu, vxlan_fdb_free_rdst); + kfree_rcu(rd, rcu); goto out; } -- cgit v0.10.2 From ba3542e15cf8b9f3dee4c64a383c7102c0d8fab3 Mon Sep 17 00:00:00 2001 From: dingtianhong Date: Sat, 17 Aug 2013 10:27:04 +0800 Subject: ipv6: convert the uses of ADBG and remove the superfluous parentheses Just follow the Joe Perches's opinion, it is a better way to fix the style errors. Suggested-by: Joe Perches Signed-off-by: Ding Tianhong Cc: Joe Perches Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ad12f7c..3ca23e8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -99,9 +99,9 @@ #define ACONF_DEBUG 2 #if ACONF_DEBUG >= 3 -#define ADBG(x) printk x +#define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__) #else -#define ADBG(x) +#define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0) #endif #define INFINITY_LIFE_TIME 0xFFFFFFFF @@ -374,9 +374,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) dev_hold(dev); if (snmp6_alloc_dev(ndev) < 0) { - ADBG((KERN_WARNING + ADBG(KERN_WARNING "%s: cannot allocate memory for statistics; dev=%s.\n", - __func__, dev->name)); + __func__, dev->name); neigh_parms_release(&nd_tbl, ndev->nd_parms); dev_put(dev); kfree(ndev); @@ -384,9 +384,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) } if (snmp6_register_dev(ndev) < 0) { - ADBG((KERN_WARNING + ADBG(KERN_WARNING "%s: cannot create /proc/net/dev_snmp6/%s\n", - __func__, dev->name)); + __func__, dev->name); neigh_parms_release(&nd_tbl, ndev->nd_parms); ndev->dead = 1; in6_dev_finish_destroy(ndev); @@ -849,7 +849,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, /* Ignore adding duplicate addresses on an interface */ if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { - ADBG(("ipv6_add_addr: already assigned\n")); + ADBG("ipv6_add_addr: already assigned\n"); err = -EEXIST; goto out; } @@ -857,7 +857,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC); if (ifa == NULL) { - ADBG(("ipv6_add_addr: malloc failed\n")); + ADBG("ipv6_add_addr: malloc failed\n"); err = -ENOBUFS; goto out; } @@ -2057,7 +2057,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) pinfo = (struct prefix_info *) opt; if (len < sizeof(struct prefix_info)) { - ADBG(("addrconf: prefix option too short\n")); + ADBG("addrconf: prefix option too short\n"); return; } @@ -3637,8 +3637,8 @@ restart: if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX)) next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX; - ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", - now, next, next_sec, next_sched)); + ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", + now, next, next_sec, next_sched); addr_chk_timer.expires = next_sched; add_timer(&addr_chk_timer); -- cgit v0.10.2 From df8372ca747f6da9e8590775721d9363c1dfc87e Mon Sep 17 00:00:00 2001 From: dingtianhong Date: Sat, 17 Aug 2013 10:27:15 +0800 Subject: ipv6: fix checkpatch errors in net/ipv6/addrconf.c ERROR: code indent should use tabs where possible: fix 2. ERROR: do not use assignment in if condition: fix 5. Signed-off-by: Ding Tianhong Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3ca23e8..21638d4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1110,8 +1110,8 @@ retry: spin_unlock_bh(&ifp->lock); regen_advance = idev->cnf.regen_max_retry * - idev->cnf.dad_transmits * - idev->nd_parms->retrans_time / HZ; + idev->cnf.dad_transmits * + idev->nd_parms->retrans_time / HZ; write_unlock(&idev->lock); /* A temporary address is created only if this calculated Preferred @@ -2501,7 +2501,8 @@ static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *p if (!dev) return -ENODEV; - if ((idev = __in6_dev_get(dev)) == NULL) + idev = __in6_dev_get(dev); + if (!idev) return -ENXIO; read_lock_bh(&idev->lock); @@ -2640,7 +2641,8 @@ static void init_loopback(struct net_device *dev) ASSERT_RTNL(); - if ((idev = ipv6_find_idev(dev)) == NULL) { + idev = ipv6_find_idev(dev); + if (!idev) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2738,7 +2740,8 @@ static void addrconf_sit_config(struct net_device *dev) * our v4 addrs in the tunnel */ - if ((idev = ipv6_find_idev(dev)) == NULL) { + idev = ipv6_find_idev(dev); + if (!idev) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2770,7 +2773,8 @@ static void addrconf_gre_config(struct net_device *dev) ASSERT_RTNL(); - if ((idev = ipv6_find_idev(dev)) == NULL) { + idev = ipv6_find_idev(dev); + if (!idev) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2801,11 +2805,11 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev) struct net *net = dev_net(idev->dev); /* first try to inherit the link-local address from the link device */ - if (idev->dev->iflink && - (link_dev = __dev_get_by_index(net, idev->dev->iflink))) { - if (!ipv6_inherit_linklocal(idev, link_dev)) + if (idev->dev->iflink) + link_dev = __dev_get_by_index(net, idev->dev->iflink); + if (link_dev && !ipv6_inherit_linklocal(idev, link_dev)) return; - } + /* then try to inherit it from any device */ for_each_netdev(net, link_dev) { if (!ipv6_inherit_linklocal(idev, link_dev)) -- cgit v0.10.2 From cb74e27356db43e3dca70ff4b915a488b5ad5545 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Sat, 17 Aug 2013 09:08:48 +0400 Subject: sundance: Add netpoll support Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 50d9c63..bf3bf6f 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -469,6 +469,17 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) } } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sundance_poll_controller(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + disable_irq(np->pci_dev->irq); + intr_handler(np->pci_dev->irq, dev); + enable_irq(np->pci_dev->irq); +} +#endif + static const struct net_device_ops netdev_ops = { .ndo_open = netdev_open, .ndo_stop = netdev_close, @@ -480,6 +491,9 @@ static const struct net_device_ops netdev_ops = { .ndo_change_mtu = change_mtu, .ndo_set_mac_address = sundance_set_mac_addr, .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sundance_poll_controller, +#endif }; static int sundance_probe1(struct pci_dev *pdev, -- cgit v0.10.2 From 2bc9ff01f1f93da6c36b34dbc7ce7c42f717693e Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Sat, 17 Aug 2013 09:08:49 +0400 Subject: sundance: Add myself as a maintainer Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller diff --git a/MAINTAINERS b/MAINTAINERS index 1c6f9db..a83dd4f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7973,6 +7973,12 @@ F: arch/m68k/sun3*/ F: arch/m68k/include/asm/sun3* F: drivers/net/ethernet/i825xx/sun3* +SUNDANCE NETWORK DRIVER +M: Denis Kirjanov +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/dlink/sundance.c + SUPERH M: Paul Mundt L: linux-sh@vger.kernel.org -- cgit v0.10.2 From 0aa857f83fe2674f973ad89ec7c0202f5c5d9554 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sun, 18 Aug 2013 16:09:30 +0800 Subject: moxa: fix missing unlock on error in moxart_mac_start_xmit() Add the missing unlock before return from function moxart_mac_start_xmit() in the error handling case. Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index abd2c54..6eee686 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -314,6 +314,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) unsigned int len; unsigned int tx_head = priv->tx_head; u32 txdes1; + int ret = NETDEV_TX_BUSY; desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); @@ -321,7 +322,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { net_dbg_ratelimited("no TX space for packet\n"); priv->stats.tx_dropped++; - return NETDEV_TX_BUSY; + goto out_unlock; } len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; @@ -330,7 +331,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) len, DMA_TO_DEVICE); if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) { netdev_err(ndev, "DMA mapping error\n"); - return NETDEV_TX_BUSY; + goto out_unlock; } priv->tx_len[tx_head] = len; @@ -360,10 +361,11 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->tx_head = TX_NEXT(tx_head); ndev->trans_start = jiffies; - + ret = NETDEV_TX_OK; +out_unlock: spin_unlock_irq(&priv->txlock); - return NETDEV_TX_OK; + return ret; } static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev) -- cgit v0.10.2 From c196ce05cfdb7b5a8af12a3096fac9e880d59617 Mon Sep 17 00:00:00 2001 From: Anders Larsen Date: Sun, 18 Aug 2013 11:54:40 +0200 Subject: pch_gbe: ethtool cannot change parameters when link is down When attempting to change e.g. the advertising mask when the link is down ecmd->speed is -1 causing mii_ethtool_sset() to bail out. This bug bit when connecting to a gigabit switch through a 4-pin (industrial) cable, since link negotiation would not complete (both endpoints claimed to be gigabit-capable, but this is not possible with only 4 pins). Any attempt to fix this by setting autonegation to not offer 1000Mbps failed as the setting would not be accepted while the link was still down... Set ecmd->speed to SPEED_1000 to satisfy mii_ethtool_sset() (the actual value of ecmd->speed doesn't matter as long as it is valid, since a re-negotation is forced afterwards). Signed-off-by: Anders Larsen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 1129db0..f0ceb89 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -118,6 +118,7 @@ static int pch_gbe_set_settings(struct net_device *netdev, * filled by get_settings() on a down link, speed is -1: */ if (speed == UINT_MAX) { speed = SPEED_1000; + ethtool_cmd_speed_set(ecmd, speed); ecmd->duplex = DUPLEX_FULL; } ret = mii_ethtool_sset(&adapter->mii, ecmd); -- cgit v0.10.2 From 397b41746333ad386d91d23ea0f79481320dcdcc Mon Sep 17 00:00:00 2001 From: Christoph Paasch Date: Sun, 18 Aug 2013 17:34:38 +0200 Subject: tcp: trivial: Remove nocache argument from tcp_v4_send_synack The nocache-argument was used in tcp_v4_send_synack as an argument to inet_csk_route_req. However, since ba3f7f04ef2b (ipv4: Kill FLOWI_FLAG_RT_NOCACHE and associated code.) this is no more used. This patch removes the unsued argument from tcp_v4_send_synack. Signed-off-by: Christoph Paasch Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 05a3d45..09d45d7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -821,8 +821,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, */ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, - u16 queue_mapping, - bool nocache) + u16 queue_mapping) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; @@ -852,7 +851,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req) { - int res = tcp_v4_send_synack(sk, NULL, req, 0, false); + int res = tcp_v4_send_synack(sk, NULL, req, 0); if (!res) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); -- cgit v0.10.2 From 35596b2796713c6a9dc05759837fa9f0e156a200 Mon Sep 17 00:00:00 2001 From: Asias He Date: Mon, 19 Aug 2013 09:23:19 +0800 Subject: vhost: Include linux/uio.h instead of linux/socket.h memcpy_fromiovec is moved from net/core/iovec.c to lib/iovec.c. linux/uio.h provides the declaration for memcpy_fromiovec. Include linux/uio.h instead of inux/socket.h for it. Signed-off-by: Asias He Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e58cf00..448efe0 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -13,7 +13,7 @@ #include #include -#include /* memcpy_fromiovec */ +#include #include #include #include -- cgit v0.10.2 From 734d2725db879f3f6fcdc2b1d2a5deae105f5e95 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 18 Aug 2013 19:08:07 -0700 Subject: ipv4: raise IP_MAX_MTU to theoretical limit As discussed last year [1], there is no compelling reason to limit IPv4 MTU to 0xFFF0, while real limit is 0xFFFF [1] : http://marc.info/?l=linux-netdev&m=135607247609434&w=2 Willem raised this issue again because some of our internal regression tests broke after lo mtu being set to 65536. IP_MTU reports 0xFFF0, and the test attempts to send a RAW datagram of mtu + 1 bytes, expecting the send() to fail, but it does not. Alexey raised interesting points about TCP MSS, that should be addressed in follow-up patches in TCP stack if needed, as someone could also set an odd mtu anyway. Signed-off-by: Eric Dumazet Cc: Alexey Kuznetsov Cc: Willem de Bruijn Signed-off-by: David S. Miller diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e805481..727f436 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -112,7 +112,8 @@ #define RT_FL_TOS(oldflp4) \ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) -#define IP_MAX_MTU 0xFFF0 +/* IPv4 datagram length is stored into 16bit field (tot_len) */ +#define IP_MAX_MTU 0xFFFF #define RT_GC_TIMEOUT (300*HZ) @@ -1227,10 +1228,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) mtu = 576; } - if (mtu > IP_MAX_MTU) - mtu = IP_MAX_MTU; - - return mtu; + return min_t(unsigned int, mtu, IP_MAX_MTU); } static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) -- cgit v0.10.2 From 0dde80268ee0a5a1511935bdb9c547191d616aa9 Mon Sep 17 00:00:00 2001 From: Hyong-Youb Kim Date: Mon, 19 Aug 2013 02:02:19 -0700 Subject: myri10ge: Add support for ndo_busy_poll Add the ndo_busy_poll handler. Use skb_mark_napi_id to mark receive packets with the napi id. For each slice, use per-slice spinlock and state variable to ensure that only one handler processes receive pacekts. Locking, statistics counters, and the ndo_busy_poll handler follow those in the ixgbe driver. So, credit goes to that driver's authors. Signed-off-by: Hyong-Youb Kim Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 66c0400..50a1d4a 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -74,6 +74,7 @@ #ifdef CONFIG_MTRR #include #endif +#include #include "myri10ge_mcp.h" #include "myri10ge_mcp_gen_header.h" @@ -194,6 +195,21 @@ struct myri10ge_slice_state { int cpu; __be32 __iomem *dca_tag; #endif +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define SLICE_STATE_IDLE 0 +#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */ +#define SLICE_STATE_POLL 2 /* poll owns this slice */ +#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL) +#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */ +#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */ +#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD) + spinlock_t lock; + unsigned long lock_napi_yield; + unsigned long lock_poll_yield; + unsigned long busy_poll_miss; + unsigned long busy_poll_cnt; +#endif /* CONFIG_NET_RX_BUSY_POLL */ char irq_desc[32]; }; @@ -909,6 +925,92 @@ abort: return status; } +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) +{ + spin_lock_init(&ss->lock); + ss->state = SLICE_STATE_IDLE; +} + +static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) +{ + int rc = true; + spin_lock(&ss->lock); + if ((ss->state & SLICE_LOCKED)) { + WARN_ON((ss->state & SLICE_STATE_NAPI)); + ss->state |= SLICE_STATE_NAPI_YIELD; + rc = false; + ss->lock_napi_yield++; + } else + ss->state = SLICE_STATE_NAPI; + spin_unlock(&ss->lock); + return rc; +} + +static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) +{ + spin_lock(&ss->lock); + WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD))); + ss->state = SLICE_STATE_IDLE; + spin_unlock(&ss->lock); +} + +static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) +{ + int rc = true; + spin_lock_bh(&ss->lock); + if ((ss->state & SLICE_LOCKED)) { + ss->state |= SLICE_STATE_POLL_YIELD; + rc = false; + ss->lock_poll_yield++; + } else + ss->state |= SLICE_STATE_POLL; + spin_unlock_bh(&ss->lock); + return rc; +} + +static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) +{ + spin_lock_bh(&ss->lock); + WARN_ON((ss->state & SLICE_STATE_NAPI)); + ss->state = SLICE_STATE_IDLE; + spin_unlock_bh(&ss->lock); +} + +static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) +{ + WARN_ON(!(ss->state & SLICE_LOCKED)); + return (ss->state & SLICE_USER_PEND); +} +#else /* CONFIG_NET_RX_BUSY_POLL */ +static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) +{ +} + +static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) +{ + return false; +} + +static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) +{ +} + +static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) +{ + return false; +} + +static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) +{ +} + +static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) +{ + return false; +} +#endif + static int myri10ge_reset(struct myri10ge_priv *mgp) { struct myri10ge_cmd cmd; @@ -1300,6 +1402,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) } } +#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */ + static inline int myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) { @@ -1311,6 +1415,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) struct pci_dev *pdev = mgp->pdev; struct net_device *dev = mgp->dev; u8 *va; + bool polling; if (len <= mgp->small_bytes) { rx = &ss->rx_small; @@ -1325,7 +1430,15 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; prefetch(va); - skb = napi_get_frags(&ss->napi); + /* When busy polling in user context, allocate skb and copy headers to + * skb's linear memory ourselves. When not busy polling, use the napi + * gro api. + */ + polling = myri10ge_ss_busy_polling(ss); + if (polling) + skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); + else + skb = napi_get_frags(&ss->napi); if (unlikely(skb == NULL)) { ss->stats.rx_dropped++; for (i = 0, remainder = len; remainder > 0; i++) { @@ -1364,8 +1477,29 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) } myri10ge_vlan_rx(mgp->dev, va, skb); skb_record_rx_queue(skb, ss - &mgp->ss[0]); + skb_mark_napi_id(skb, &ss->napi); + + if (polling) { + int hlen; + + /* myri10ge_vlan_rx might have moved the header, so compute + * length and address again. + */ + hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN; + va = page_address(skb_frag_page(&rx_frags[0])) + + rx_frags[0].page_offset; + /* Copy header into the skb linear memory */ + skb_copy_to_linear_data(skb, va, hlen); + rx_frags[0].page_offset += hlen; + rx_frags[0].size -= hlen; + skb->data_len -= hlen; + skb->tail += hlen; + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + else + napi_gro_frags(&ss->napi); - napi_gro_frags(&ss->napi); return 1; } @@ -1524,10 +1658,14 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) if (ss->mgp->dca_enabled) myri10ge_update_dca(ss); #endif + /* Try later if the busy_poll handler is running. */ + if (!myri10ge_ss_lock_napi(ss)) + return budget; /* process as many rx events as NAPI will allow */ work_done = myri10ge_clean_rx_done(ss, budget); + myri10ge_ss_unlock_napi(ss); if (work_done < budget) { napi_complete(napi); put_be32(htonl(3), ss->irq_claim); @@ -1535,6 +1673,34 @@ static int myri10ge_poll(struct napi_struct *napi, int budget) return work_done; } +#ifdef CONFIG_NET_RX_BUSY_POLL +static int myri10ge_busy_poll(struct napi_struct *napi) +{ + struct myri10ge_slice_state *ss = + container_of(napi, struct myri10ge_slice_state, napi); + struct myri10ge_priv *mgp = ss->mgp; + int work_done; + + /* Poll only when the link is up */ + if (mgp->link_state != MXGEFW_LINK_UP) + return LL_FLUSH_FAILED; + + if (!myri10ge_ss_lock_poll(ss)) + return LL_FLUSH_BUSY; + + /* Process a small number of packets */ + work_done = myri10ge_clean_rx_done(ss, 4); + if (work_done) + ss->busy_poll_cnt += work_done; + else + ss->busy_poll_miss++; + + myri10ge_ss_unlock_poll(ss); + + return work_done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + static irqreturn_t myri10ge_intr(int irq, void *arg) { struct myri10ge_slice_state *ss = arg; @@ -1742,6 +1908,10 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", "wake_queue", "stop_queue", "tx_linearized", +#ifdef CONFIG_NET_RX_BUSY_POLL + "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss", + "rx_busy_poll_cnt", +#endif }; #define MYRI10GE_NET_STATS_LEN 21 @@ -1842,6 +2012,12 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)ss->tx.wake_queue; data[i++] = (unsigned int)ss->tx.stop_queue; data[i++] = (unsigned int)ss->tx.linearized; +#ifdef CONFIG_NET_RX_BUSY_POLL + data[i++] = ss->lock_napi_yield; + data[i++] = ss->lock_poll_yield; + data[i++] = ss->busy_poll_miss; + data[i++] = ss->busy_poll_cnt; +#endif } } @@ -2405,6 +2581,9 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } + /* Initialize the slice spinlock and state used for polling */ + myri10ge_ss_init_lock(ss); + /* must happen prior to any irq */ napi_enable(&(ss)->napi); } @@ -2481,9 +2660,19 @@ static int myri10ge_close(struct net_device *dev) del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; + local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */ for (i = 0; i < mgp->num_slices; i++) { napi_disable(&mgp->ss[i].napi); + /* Lock the slice to prevent the busy_poll handler from + * accessing it. Later when we bring the NIC up, myri10ge_open + * resets the slice including this lock. + */ + while (!myri10ge_ss_lock_napi(&mgp->ss[i])) { + pr_info("Slice %d locked\n", i); + mdelay(1); + } } + local_bh_enable(); netif_carrier_off(dev); netif_tx_stop_all_queues(dev); @@ -3569,8 +3758,11 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) ss->fw_stats, ss->fw_stats_bus); ss->fw_stats = NULL; } + napi_hash_del(&ss->napi); netif_napi_del(&ss->napi); } + /* Wait till napi structs are no longer used, and then free ss. */ + synchronize_rcu(); kfree(mgp->ss); mgp->ss = NULL; } @@ -3606,6 +3798,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) ss->dev = mgp->dev; netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, myri10ge_napi_weight); + napi_hash_add(&ss->napi); } return 0; abort: @@ -3748,6 +3941,9 @@ static const struct net_device_ops myri10ge_netdev_ops = { .ndo_change_mtu = myri10ge_change_mtu, .ndo_set_rx_mode = myri10ge_set_multicast_list, .ndo_set_mac_address = myri10ge_set_mac_address, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = myri10ge_busy_poll, +#endif }; static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -- cgit v0.10.2 From ea857f28ab5ca82508e70ffbc8cb296705133f01 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 19 Aug 2013 10:05:10 +0300 Subject: ipip: dereferencing an ERR_PTR in ip_tunnel_init_net() We need to move the derefernce after the IS_ERR() check. Signed-off-by: Dan Carpenter Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index a4d9126..24549b4 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -854,14 +854,14 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, rtnl_lock(); itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms); - /* FB netdevice is special: we have one, and only one per netns. - * Allowing to move it to another netns is clearly unsafe. - */ - itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; rtnl_unlock(); if (IS_ERR(itn->fb_tunnel_dev)) return PTR_ERR(itn->fb_tunnel_dev); + /* FB netdevice is special: we have one, and only one per netns. + * Allowing to move it to another netns is clearly unsafe. + */ + itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; return 0; } -- cgit v0.10.2 From 06a64f91da72cb5827e2bedef2ead60a123fd66e Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 18 Aug 2013 02:15:42 +0400 Subject: SH7619: fix Ether support The 'sh_eth' driver's probe will crash as the platform code is hopelessly behind the platform data -- it passes PHY ID instead of 'struct sh_eth_plat_data *'. Strangely, both commit d88a3ea6fa4c (SH7619 add ethernet controler support) that added the platform code and commit 71557a37adb5 ([netdrvr] sh_eth: Add SH7619 support) were done in about the same time, yet the latter one added 'struct sh_eth_plat_data' and the platform code didn't ever get updated... Add the proper platform data and fix off-by-one memory resource end error, while at it... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index bb11e19..bbadd48 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -110,10 +111,17 @@ static struct platform_device scif2_device = { }, }; +static struct sh_eth_plat_data eth_platform_data = { + .phy = 1, + .edmac_endian = EDMAC_LITTLE_ENDIAN, + .register_type = SH_ETH_REG_FAST_SH3_SH2, + .phy_interace = PHY_INTERFACE_MODE_MII, +}; + static struct resource eth_resources[] = { [0] = { .start = 0xfb000000, - .end = 0xfb0001c8, + .end = 0xfb0001c7, .flags = IORESOURCE_MEM, }, [1] = { @@ -127,7 +135,7 @@ static struct platform_device eth_device = { .name = "sh7619-ether", .id = -1, .dev = { - .platform_data = (void *)1, + .platform_data = ð_platform_data, }, .num_resources = ARRAY_SIZE(eth_resources), .resource = eth_resources, -- cgit v0.10.2 From bd61224b1cbec096694e89c4187119c8576fe186 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 18 Aug 2013 02:19:09 +0400 Subject: SolutionEngine7724: fix Ether support The Ether platform data is behind the declaration of 'struct sh_eth_plat_data' as it's lacking the initializers for the 'register_type' and 'phy_interface' fields -- it means they'll be implicitly and wrongly set to SH_ETH_REG_GIGABIT and PHY_INTERFACE_MODE_NA. Initialize the fields explicitly and fix off-by-one error in the Ether memory resource end, while at it... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index b70180e..aed4ca9 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -365,7 +365,7 @@ static struct platform_device keysc_device = { static struct resource sh_eth_resources[] = { [0] = { .start = SH_ETH_ADDR, - .end = SH_ETH_ADDR + 0x1FC, + .end = SH_ETH_ADDR + 0x1FC - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -377,6 +377,8 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8187 */ .edmac_endian = EDMAC_LITTLE_ENDIAN, + .register_type = SH_ETH_REG_FAST_SH4, + .phy_interace = PHY_INTERFACE_MODE_MII, }; static struct platform_device sh_eth_device = { -- cgit v0.10.2 From a3153d8c41132d6fda080a4b3e53b7ee25d3c125 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 18 Aug 2013 03:11:28 +0400 Subject: sh_eth: get register layout from 'struct sh_eth_cpu_data' The register layout is a SoC characteristic, so it's wrong that it's stored in the otherwise board specific platform data. Add 'register_type' field to 'struct sh_eth_cpu_data', initialize it properly for each SoC, and read it from this structure instead of the platfrom data from now on... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 9e2afe8..c3570764 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -378,6 +378,8 @@ static struct sh_eth_cpu_data r8a777x_data = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_r8a777x, + .register_type = SH_ETH_REG_FAST_RCAR, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, .eesipr_value = 0x01ff009f, @@ -398,6 +400,8 @@ static struct sh_eth_cpu_data r8a7790_data = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_r8a777x, + .register_type = SH_ETH_REG_FAST_RCAR, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, .eesipr_value = 0x01ff009f, @@ -435,6 +439,8 @@ static struct sh_eth_cpu_data sh7724_data = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_sh7724, + .register_type = SH_ETH_REG_FAST_SH4, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, .eesipr_value = 0x01ff009f, @@ -473,6 +479,8 @@ static struct sh_eth_cpu_data sh7757_data = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_sh7757, + .register_type = SH_ETH_REG_FAST_SH4, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .rmcr_value = 0x00000001, @@ -541,6 +549,8 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_giga, + .register_type = SH_ETH_REG_GIGABIT, + .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, @@ -599,6 +609,8 @@ static struct sh_eth_cpu_data sh7734_data = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_gether, + .register_type = SH_ETH_REG_GIGABIT, + .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, @@ -626,6 +638,8 @@ static struct sh_eth_cpu_data sh7763_data = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_gether, + .register_type = SH_ETH_REG_GIGABIT, + .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, @@ -663,6 +677,8 @@ static struct sh_eth_cpu_data r8a7740_data = { .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_gether, + .register_type = SH_ETH_REG_GIGABIT, + .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, @@ -685,6 +701,8 @@ static struct sh_eth_cpu_data r8a7740_data = { }; static struct sh_eth_cpu_data sh7619_data = { + .register_type = SH_ETH_REG_FAST_SH3_SH2, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .apr = 1, @@ -694,6 +712,8 @@ static struct sh_eth_cpu_data sh7619_data = { }; static struct sh_eth_cpu_data sh771x_data = { + .register_type = SH_ETH_REG_FAST_SH3_SH2, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tsu = 1, }; @@ -2643,10 +2663,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->edmac_endian = pd->edmac_endian; mdp->no_ether_link = pd->no_ether_link; mdp->ether_link_active_low = pd->ether_link_active_low; - mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); /* set cpu data */ mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; + mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); sh_eth_set_default_cpu_data(mdp->cd); /* set function */ diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index da93f5c..1e78528 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -454,6 +454,7 @@ struct sh_eth_cpu_data { void (*set_rate)(struct net_device *ndev); /* mandatory initialize value */ + int register_type; unsigned long eesipr_value; /* optional initialize value */ -- cgit v0.10.2 From 8d3214c4e8c8be6efd8ec7a172239ebbd4deb04b Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 18 Aug 2013 03:13:26 +0400 Subject: sh_eth: remove 'register_type' field from 'struct sh_eth_plat_data' Now that the 'register_type' field of the 'sh_eth' driver's platform data is not used by the driver anymore, it's time to remove it and its initializers from the SH platform code. Also move *enum* declaring values for this field from to the local driver's header file as they're only needed by the driver itself now... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index c5be60d..3a6ffa2 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -358,7 +358,6 @@ static struct platform_device usbhsf_device = { static struct sh_eth_plat_data sh_eth_platdata = { .phy = 0x00, /* LAN8710A */ .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_GIGABIT, .phy_interface = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 3354a85..fa8885b 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c @@ -89,7 +89,6 @@ static struct sh_mobile_sdhi_info sdhi0_info = { static struct sh_eth_plat_data ether_platform_data __initdata = { .phy = 0x01, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_FAST_RCAR, .phy_interface = PHY_INTERFACE_MODE_RMII, /* * Although the LINK signal is available on the board, it's connected to diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c index 4d94dff..7291e2f 100644 --- a/arch/sh/boards/board-espt.c +++ b/arch/sh/boards/board-espt.c @@ -80,7 +80,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh7763_eth_pdata = { .phy = 0, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_GIGABIT, .phy_interface = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 4f114d1..25c5a93 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c @@ -77,7 +77,6 @@ static struct resource sh_eth0_resources[] = { static struct sh_eth_plat_data sh7757_eth0_pdata = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_FAST_SH4, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; @@ -106,7 +105,6 @@ static struct resource sh_eth1_resources[] = { static struct sh_eth_plat_data sh7757_eth1_pdata = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_FAST_SH4, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; @@ -151,7 +149,6 @@ static struct resource sh_eth_giga0_resources[] = { static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { .phy = 18, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_GIGABIT, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; @@ -186,7 +183,6 @@ static struct resource sh_eth_giga1_resources[] = { static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { .phy = 19, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_GIGABIT, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 61fade0..a4f630f 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8700 */ .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_FAST_SH4, .phy_interface = PHY_INTERFACE_MODE_MII, .ether_link_active_low = 1 }; diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index aed4ca9..e96e053 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -377,7 +377,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8187 */ .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_FAST_SH4, .phy_interace = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c index 50ba481..2c8fb04 100644 --- a/arch/sh/boards/mach-sh7763rdp/setup.c +++ b/arch/sh/boards/mach-sh7763rdp/setup.c @@ -88,7 +88,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh7763_eth_pdata = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_GIGABIT, .phy_interface = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index bbadd48..1947281 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -114,7 +114,6 @@ static struct platform_device scif2_device = { static struct sh_eth_plat_data eth_platform_data = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .register_type = SH_ETH_REG_FAST_SH3_SH2, .phy_interace = PHY_INTERFACE_MODE_MII, }; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 1e78528..a0db02c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -157,6 +157,13 @@ enum { SH_ETH_MAX_REGISTER_OFFSET, }; +enum { + SH_ETH_REG_GIGABIT, + SH_ETH_REG_FAST_RCAR, + SH_ETH_REG_FAST_SH4, + SH_ETH_REG_FAST_SH3_SH2 +}; + /* Driver's parameters */ #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) #define SH4_SKB_RX_ALIGN 32 diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 6205eeb..90b5e30 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -5,17 +5,10 @@ #include enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; -enum { - SH_ETH_REG_GIGABIT, - SH_ETH_REG_FAST_RCAR, - SH_ETH_REG_FAST_SH4, - SH_ETH_REG_FAST_SH3_SH2 -}; struct sh_eth_plat_data { int phy; int edmac_endian; - int register_type; phy_interface_t phy_interface; void (*set_mdio_gate)(void *addr); -- cgit v0.10.2 From a0e186003be7892fd75613a23aaafaf09f3611e6 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Mon, 19 Aug 2013 19:58:40 +0800 Subject: net: fsl_pq_mdio: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index c93a056..b7dcd41 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -409,7 +409,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) priv->regs = priv->map + data->mii_offset; new_bus->parent = &pdev->dev; - dev_set_drvdata(&pdev->dev, new_bus); + platform_set_drvdata(pdev, new_bus); if (data->get_tbipa) { for_each_child_of_node(np, tbi) { -- cgit v0.10.2 From 65d7e7ad056b78fba0d9c2cbcb371df3151ae236 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Mon, 19 Aug 2013 19:59:09 +0800 Subject: net: ucc_geth: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &ofdev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 3c43dac..533885c 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3911,8 +3911,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) static int ucc_geth_remove(struct platform_device* ofdev) { - struct device *device = &ofdev->dev; - struct net_device *dev = dev_get_drvdata(device); + struct net_device *dev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(dev); unregister_netdev(dev); -- cgit v0.10.2 From 4014e36018c6bec7c826d3734e1ed68cb360c01d Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Mon, 19 Aug 2013 19:59:16 +0800 Subject: net: fec_mpc52xx_phy: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &of->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 360a578..eb44797 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -123,8 +123,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) static int mpc52xx_fec_mdio_remove(struct platform_device *of) { - struct device *dev = &of->dev; - struct mii_bus *bus = dev_get_drvdata(dev); + struct mii_bus *bus = platform_get_drvdata(of); struct mpc52xx_fec_mdio_priv *priv = bus->priv; mdiobus_unregister(bus); -- cgit v0.10.2 From 0549d95216710e52cd8d9cc5e399d11365435bed Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Mon, 19 Aug 2013 19:59:25 +0800 Subject: net: sunbmac: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &of->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 0d43fa9..0a32ca5 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1239,7 +1239,7 @@ static int bigmac_sbus_probe(struct platform_device *op) static int bigmac_sbus_remove(struct platform_device *op) { - struct bigmac *bp = dev_get_drvdata(&op->dev); + struct bigmac *bp = platform_get_drvdata(op); struct device *parent = op->dev.parent; struct net_device *net_dev = bp->dev; struct platform_device *qec_op; -- cgit v0.10.2 From 511c4e00f8fa5f7d20baabda8b9504af71b88a49 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Mon, 19 Aug 2013 19:59:36 +0800 Subject: net: sunhme: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &of->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 171f5b0..b90d311 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3231,7 +3231,7 @@ static int hme_sbus_probe(struct platform_device *op) static int hme_sbus_remove(struct platform_device *op) { - struct happy_meal *hp = dev_get_drvdata(&op->dev); + struct happy_meal *hp = platform_get_drvdata(op); struct net_device *net_dev = hp->dev; unregister_netdev(net_dev); -- cgit v0.10.2 From 34e0184d98bd3a0e19b1d55bfdbd2186bfe5eca4 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Mon, 19 Aug 2013 20:00:25 +0800 Subject: net: xilinx_emaclite: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &of_dev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Acked-by: Michal Simek Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index fd4dbda..7c1ccbc 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1230,8 +1230,7 @@ error: */ static int xemaclite_of_remove(struct platform_device *of_dev) { - struct device *dev = &of_dev->dev; - struct net_device *ndev = dev_get_drvdata(dev); + struct net_device *ndev = platform_get_drvdata(of_dev); struct net_local *lp = netdev_priv(ndev); -- cgit v0.10.2 From 84ce22df92f5cf9ecb76d53f4bf5f6d504684540 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Mon, 19 Aug 2013 20:00:37 +0800 Subject: net: davinci_mdio: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Acked-by: Mugunthan V N Acked-by: Lad, Prabhakar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 16ddfc3..01b0cc5 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -421,8 +421,7 @@ bail_out: static int davinci_mdio_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct davinci_mdio_data *data = dev_get_drvdata(dev); + struct davinci_mdio_data *data = platform_get_drvdata(pdev); if (data->bus) { mdiobus_unregister(data->bus); -- cgit v0.10.2 From f607e059505cd3c3e96096d09f5882eb6421320a Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 19 Aug 2013 13:20:39 +0200 Subject: bcm63xx_enet: replace devm_request_and_ioremap by devm_ioremap_resource Use devm_ioremap_resource instead of devm_request_and_ioremap. This was done using the semantic patch scripts/coccinelle/api/devm_ioremap_resource.cocci The relevant call to platform_get_resource was manually moved down to the call to devm_ioremap_resource. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index b1bcd4b..89ff09e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1747,11 +1747,10 @@ static int bcm_enet_probe(struct platform_device *pdev) if (!bcm_enet_shared_base[0]) return -ENODEV; - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); - if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) + if (!res_irq || !res_irq_rx || !res_irq_tx) return -ENODEV; ret = 0; @@ -1767,9 +1766,10 @@ static int bcm_enet_probe(struct platform_device *pdev) if (ret) goto out; - priv->base = devm_request_and_ioremap(&pdev->dev, res_mem); - if (priv->base == NULL) { - ret = -ENOMEM; + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); goto out; } -- cgit v0.10.2 From 7eaa48a45ca74d8dbc65e7dedaa8c0736154fd5f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 20 Aug 2013 23:44:39 -0700 Subject: Revert "ipv6: fix checkpatch errors in net/ipv6/addrconf.c" This reverts commit df8372ca747f6da9e8590775721d9363c1dfc87e. These changes are buggy and make unintended semantic changes to ip6_tnl_add_linklocal(). Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 21638d4..3ca23e8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1110,8 +1110,8 @@ retry: spin_unlock_bh(&ifp->lock); regen_advance = idev->cnf.regen_max_retry * - idev->cnf.dad_transmits * - idev->nd_parms->retrans_time / HZ; + idev->cnf.dad_transmits * + idev->nd_parms->retrans_time / HZ; write_unlock(&idev->lock); /* A temporary address is created only if this calculated Preferred @@ -2501,8 +2501,7 @@ static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *p if (!dev) return -ENODEV; - idev = __in6_dev_get(dev); - if (!idev) + if ((idev = __in6_dev_get(dev)) == NULL) return -ENXIO; read_lock_bh(&idev->lock); @@ -2641,8 +2640,7 @@ static void init_loopback(struct net_device *dev) ASSERT_RTNL(); - idev = ipv6_find_idev(dev); - if (!idev) { + if ((idev = ipv6_find_idev(dev)) == NULL) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2740,8 +2738,7 @@ static void addrconf_sit_config(struct net_device *dev) * our v4 addrs in the tunnel */ - idev = ipv6_find_idev(dev); - if (!idev) { + if ((idev = ipv6_find_idev(dev)) == NULL) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2773,8 +2770,7 @@ static void addrconf_gre_config(struct net_device *dev) ASSERT_RTNL(); - idev = ipv6_find_idev(dev); - if (!idev) { + if ((idev = ipv6_find_idev(dev)) == NULL) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2805,11 +2801,11 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev) struct net *net = dev_net(idev->dev); /* first try to inherit the link-local address from the link device */ - if (idev->dev->iflink) - link_dev = __dev_get_by_index(net, idev->dev->iflink); - if (link_dev && !ipv6_inherit_linklocal(idev, link_dev)) + if (idev->dev->iflink && + (link_dev = __dev_get_by_index(net, idev->dev->iflink))) { + if (!ipv6_inherit_linklocal(idev, link_dev)) return; - + } /* then try to inherit it from any device */ for_each_netdev(net, link_dev) { if (!ipv6_inherit_linklocal(idev, link_dev)) -- cgit v0.10.2 From e837735ec406a347756e0ef3aea124a6d51ea1ba Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 20 Aug 2013 12:16:06 +0200 Subject: ip6_tunnel: ensure to always have a link local address When an Xin6 tunnel is set up, we check other netdevices to inherit the link- local address. If none is available, the interface will not have any link-local address. RFC4862 expects that each interface has a link local address. Now than this kind of tunnels supports x-netns, it's easy to fall in this case (by creating the tunnel in a netns where ethernet interfaces stand and then moving it to a other netns where no ethernet interface is available). RFC4291, Appendix A suggests two methods: the first is the one currently implemented, the second is to generate a unique identifier, so that we can always generate the link-local address. Let's use eth_random_addr() to generate this interface indentifier. I remove completly the previous method, hence for the whole life of the interface, the link-local address remains the same (previously, it depends on which ethernet interfaces were up when the tunnel interface was set up). Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3ca23e8..8549a5d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1814,6 +1814,16 @@ static int addrconf_ifid_gre(u8 *eui, struct net_device *dev) return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr); } +static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev) +{ + memcpy(eui, dev->perm_addr, 3); + memcpy(eui + 5, dev->perm_addr + 3, 3); + eui[3] = 0xFF; + eui[4] = 0xFE; + eui[0] ^= 2; + return 0; +} + static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) { switch (dev->type) { @@ -1832,6 +1842,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) return addrconf_ifid_eui64(eui, dev); case ARPHRD_IEEE1394: return addrconf_ifid_ieee1394(eui, dev); + case ARPHRD_TUNNEL6: + return addrconf_ifid_ip6tnl(eui, dev); } return -1; } @@ -2709,7 +2721,8 @@ static void addrconf_dev_config(struct net_device *dev) (dev->type != ARPHRD_ARCNET) && (dev->type != ARPHRD_INFINIBAND) && (dev->type != ARPHRD_IEEE802154) && - (dev->type != ARPHRD_IEEE1394)) { + (dev->type != ARPHRD_IEEE1394) && + (dev->type != ARPHRD_TUNNEL6)) { /* Alas, we support only Ethernet autoconfiguration. */ return; } @@ -2795,44 +2808,6 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) return -1; } -static void ip6_tnl_add_linklocal(struct inet6_dev *idev) -{ - struct net_device *link_dev; - struct net *net = dev_net(idev->dev); - - /* first try to inherit the link-local address from the link device */ - if (idev->dev->iflink && - (link_dev = __dev_get_by_index(net, idev->dev->iflink))) { - if (!ipv6_inherit_linklocal(idev, link_dev)) - return; - } - /* then try to inherit it from any device */ - for_each_netdev(net, link_dev) { - if (!ipv6_inherit_linklocal(idev, link_dev)) - return; - } - pr_debug("init ip6-ip6: add_linklocal failed\n"); -} - -/* - * Autoconfigure tunnel with a link-local address so routing protocols, - * DHCPv6, MLD etc. can be run over the virtual link - */ - -static void addrconf_ip6_tnl_config(struct net_device *dev) -{ - struct inet6_dev *idev; - - ASSERT_RTNL(); - - idev = addrconf_add_dev(dev); - if (IS_ERR(idev)) { - pr_debug("init ip6-ip6: add_dev failed\n"); - return; - } - ip6_tnl_add_linklocal(idev); -} - static int addrconf_notify(struct notifier_block *this, unsigned long event, void *ptr) { @@ -2900,9 +2875,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, addrconf_gre_config(dev); break; #endif - case ARPHRD_TUNNEL6: - addrconf_ip6_tnl_config(dev); - break; case ARPHRD_LOOPBACK: init_loopback(dev); break; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index cc3bb20..d6e00a3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1471,6 +1472,9 @@ static void ip6_tnl_dev_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + /* This perm addr will be used as interface identifier by IPv6 */ + dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->perm_addr); } -- cgit v0.10.2 From dbe34724c08ea25d39d31735120077013fbbb6fb Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Mon, 19 Aug 2013 17:47:40 +0530 Subject: drivers: net: cpsw: remove platform data header file of cpsw CPSW driver no longer supports platform register as all the SoCs which has CPSW are supporting DT only booting, so moving cpsw.h header file from platform include to drivers/net/ethernet/ti Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 0fcf212..b4a85f5 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -34,9 +34,9 @@ #include #include -#include #include +#include "cpsw.h" #include "cpsw_ale.h" #include "cpts.h" #include "davinci_cpdma.h" diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h new file mode 100644 index 0000000..eb3e101 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw.h @@ -0,0 +1,42 @@ +/* Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2013 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __CPSW_H__ +#define __CPSW_H__ + +#include + +struct cpsw_slave_data { + char phy_id[MII_BUS_ID_SIZE]; + int phy_if; + u8 mac_addr[ETH_ALEN]; + u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ +}; + +struct cpsw_platform_data { + struct cpsw_slave_data *slave_data; + u32 ss_reg_ofs; /* Subsystem control register offset */ + u32 channels; /* number of cpdma channels (symmetric) */ + u32 slaves; /* number of slave cpgmac ports */ + u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ + u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ + u32 ale_entries; /* ale table size */ + u32 bd_ram_size; /*buffer descriptor ram size */ + u32 rx_descs; /* Number of Rx Descriptios */ + u32 mac_control; /* Mac control register */ + u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ + bool dual_emac; /* Enable Dual EMAC mode */ +}; + +#endif /* __CPSW_H__ */ diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h deleted file mode 100644 index bb3cd58..0000000 --- a/include/linux/platform_data/cpsw.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Texas Instruments Ethernet Switch Driver - * - * Copyright (C) 2012 Texas Instruments - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef __CPSW_H__ -#define __CPSW_H__ - -#include - -struct cpsw_slave_data { - char phy_id[MII_BUS_ID_SIZE]; - int phy_if; - u8 mac_addr[ETH_ALEN]; - u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ - -}; - -struct cpsw_platform_data { - u32 ss_reg_ofs; /* Subsystem control register offset */ - u32 channels; /* number of cpdma channels (symmetric) */ - u32 slaves; /* number of slave cpgmac ports */ - struct cpsw_slave_data *slave_data; - u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ - u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ - u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ - u32 ale_entries; /* ale table size */ - u32 bd_ram_size; /*buffer descriptor ram size */ - u32 rx_descs; /* Number of Rx Descriptios */ - u32 mac_control; /* Mac control register */ - u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ - bool dual_emac; /* Enable Dual EMAC mode */ -}; - -#endif /* __CPSW_H__ */ -- cgit v0.10.2 From d8a64420eb267bec9f4c99627deee4fa3bf4133e Mon Sep 17 00:00:00 2001 From: Matus Ujhelyi Date: Tue, 20 Aug 2013 07:59:38 +0200 Subject: net: cpsw: Add support for wake-on-lan for cpsw Some phy's can be configured to enable wake on lan (e.g. at803x or marvell 88E1318S). There is no way how to enable wol on CPSW with such connected phys. This patch adds this support. It is provided by calling the phy's related code. Tested on board with at8030x connected phy. Wol interrupt line is connected to GPIO0 on am335x. Signed-off-by: Matus Ujhelyi Acked-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index b4a85f5..79974e3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1640,6 +1640,29 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) return -EOPNOTSUPP; } +static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + wol->supported = 0; + wol->wolopts = 0; + + if (priv->slaves[slave_no].phy) + phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol); +} + +static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol); + else + return -EOPNOTSUPP; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -1653,6 +1676,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_sset_count = cpsw_get_sset_count, .get_strings = cpsw_get_strings, .get_ethtool_stats = cpsw_get_ethtool_stats, + .get_wol = cpsw_get_wol, + .set_wol = cpsw_set_wol, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, -- cgit v0.10.2 From 846989635b368c41efd9079ec6fe0e8d2845ec11 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 20 Aug 2013 12:22:00 +0200 Subject: net: ipv6: igmp6_event_query: use msecs_to_jiffies Use proper API functions to calculate jiffies from milliseconds and not the crude method of dividing HZ by a value. This ensures more accurate values even in the case of strange HZ values. While at it, also simplify code in the mlh2 case by using max(). Signed-off-by: Daniel Borkmann Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6c76df9..57863e2 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1150,9 +1150,7 @@ int igmp6_event_query(struct sk_buff *skb) int switchback; /* MLDv1 router present */ - /* Translate milliseconds to jiffies */ - max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000; - + max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); switchback = (idev->mc_qrv + 1) * max_delay; idev->mc_v1_seen = jiffies + switchback; @@ -1169,10 +1167,11 @@ int igmp6_event_query(struct sk_buff *skb) return -EINVAL; mlh2 = (struct mld2_query *)skb_transport_header(skb); - max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; - if (!max_delay) - max_delay = 1; + + max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mlh2->mld2q_mrc))), 1UL); + idev->mc_maxdelay = max_delay; + if (mlh2->mld2q_qrv) idev->mc_qrv = mlh2->mld2q_qrv; if (group_type == IPV6_ADDR_ANY) { /* general query */ -- cgit v0.10.2 From c2cef4e88808cc566956605a39a96080912c0566 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 20 Aug 2013 12:22:01 +0200 Subject: net: ipv6: minor: *_start_timer: rather use unsigned long For the functions mld_gq_start_timer(), mld_ifc_start_timer(), and mld_dad_start_timer(), rather use unsigned long than int as we operate only on unsigned values anyway. This seems more appropriate as there is no good reason to do type conversions to int, that could lead to future errors. Signed-off-by: Daniel Borkmann Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 57863e2..33f98d9 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -996,24 +996,24 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, static void mld_gq_start_timer(struct inet6_dev *idev) { - int tv = net_random() % idev->mc_maxdelay; + unsigned long tv = net_random() % idev->mc_maxdelay; idev->mc_gq_running = 1; if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2)) in6_dev_hold(idev); } -static void mld_ifc_start_timer(struct inet6_dev *idev, int delay) +static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay) { - int tv = net_random() % delay; + unsigned long tv = net_random() % delay; if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2)) in6_dev_hold(idev); } -static void mld_dad_start_timer(struct inet6_dev *idev, int delay) +static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay) { - int tv = net_random() % delay; + unsigned long tv = net_random() % delay; if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2)) in6_dev_hold(idev); -- cgit v0.10.2 From 9fd0784164047583fa856c9a5aeda1d6c6fb0399 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 20 Aug 2013 12:22:02 +0200 Subject: net: ipv6: mcast: minor: use defines for rfc3810/8.1 lengths Instead of hard-coding length values, use a define to make it clear where those lengths come from. Signed-off-by: Daniel Borkmann Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 33f98d9..98ead2b 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -107,9 +107,12 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev); - #define MLD_QRV_DEFAULT 2 +/* RFC3810, 8.1 Query Version Distinctions */ +#define MLD_V1_QUERY_LEN 24 +#define MLD_V2_QUERY_LEN_MIN 28 + #define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \ (idev)->cnf.force_mld_version == 1 || \ ((idev)->mc_v1_seen && \ @@ -1146,7 +1149,7 @@ int igmp6_event_query(struct sk_buff *skb) !(group_type&IPV6_ADDR_MULTICAST)) return -EINVAL; - if (len == 24) { + if (len == MLD_V1_QUERY_LEN) { int switchback; /* MLDv1 router present */ @@ -1160,7 +1163,7 @@ int igmp6_event_query(struct sk_buff *skb) __in6_dev_put(idev); /* clear deleted report items */ mld_clear_delrec(idev); - } else if (len >= 28) { + } else if (len >= MLD_V2_QUERY_LEN_MIN) { int srcs_offset = sizeof(struct mld2_query) - sizeof(struct icmp6hdr); if (!pskb_may_pull(skb, srcs_offset)) -- cgit v0.10.2 From 954c396756e3d31985f7bc6a414a988a4736a7d0 Mon Sep 17 00:00:00 2001 From: Sean Cross Date: Wed, 21 Aug 2013 01:46:12 +0000 Subject: net/phy: micrel: Add OF configuration support for ksz9021 Some boards require custom PHY configuration, for example due to trace length differences. Add the ability to configure these registers in order to get the PHY to function on boards that need it. Because PHYs are auto-detected based on MDIO device IDs, allow PHY configuration to be specified in the parent Ethernet device node if no PHY device node is present. Signed-off-by: Sean Cross Signed-off-by: David S. Miller diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt new file mode 100644 index 0000000..997a63f --- /dev/null +++ b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt @@ -0,0 +1,49 @@ +Micrel KSZ9021 Gigabit Ethernet PHY + +Some boards require special tuning values, particularly when it comes to +clock delays. You can specify clock delay values by adding +micrel-specific properties to an Ethernet OF device node. + +All skew control options are specified in picoseconds. The minimum +value is 0, and the maximum value is 3000. + +Optional properties: + - rxc-skew-ps : Skew control of RXC pad + - rxdv-skew-ps : Skew control of RX CTL pad + - txc-skew-ps : Skew control of TXC pad + - txen-skew-ps : Skew control of TX_CTL pad + - rxd0-skew-ps : Skew control of RX data 0 pad + - rxd1-skew-ps : Skew control of RX data 1 pad + - rxd2-skew-ps : Skew control of RX data 2 pad + - rxd3-skew-ps : Skew control of RX data 3 pad + - txd0-skew-ps : Skew control of TX data 0 pad + - txd1-skew-ps : Skew control of TX data 1 pad + - txd2-skew-ps : Skew control of TX data 2 pad + - txd3-skew-ps : Skew control of TX data 3 pad + +Examples: + + /* Attach to an Ethernet device with autodetected PHY */ + &enet { + rxc-skew-ps = <3000>; + rxdv-skew-ps = <0>; + txc-skew-ps = <3000>; + txen-skew-ps = <0>; + status = "okay"; + }; + + /* Attach to an explicitly-specified PHY */ + mdio { + phy0: ethernet-phy@0 { + rxc-skew-ps = <3000>; + rxdv-skew-ps = <0>; + txc-skew-ps = <3000>; + txen-skew-ps = <0>; + reg = <0>; + }; + }; + ethernet@70000 { + status = "okay"; + phy = <&phy0>; + phy-mode = "rgmii-id"; + }; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9ca4945..c31aad0 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -25,6 +25,7 @@ #include #include #include +#include /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -53,6 +54,20 @@ #define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) #define KSZ8051_RMII_50MHZ_CLK (1 << 7) +/* Write/read to/from extended registers */ +#define MII_KSZPHY_EXTREG 0x0b +#define KSZPHY_EXTREG_WRITE 0x8000 + +#define MII_KSZPHY_EXTREG_WRITE 0x0c +#define MII_KSZPHY_EXTREG_READ 0x0d + +/* Extended registers */ +#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104 +#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105 +#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106 + +#define PS_TO_REG 200 + static int ksz_config_flags(struct phy_device *phydev) { int regval; @@ -65,6 +80,20 @@ static int ksz_config_flags(struct phy_device *phydev) return 0; } +static int kszphy_extended_write(struct phy_device *phydev, + u32 regnum, u16 val) +{ + phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum); + return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val); +} + +static int kszphy_extended_read(struct phy_device *phydev, + u32 regnum) +{ + phy_write(phydev, MII_KSZPHY_EXTREG, regnum); + return phy_read(phydev, MII_KSZPHY_EXTREG_READ); +} + static int kszphy_ack_interrupt(struct phy_device *phydev) { /* bit[7..0] int status, which is a read and clear register. */ @@ -141,6 +170,78 @@ static int ks8051_config_init(struct phy_device *phydev) return rc < 0 ? rc : 0; } +static int ksz9021_load_values_from_of(struct phy_device *phydev, + struct device_node *of_node, u16 reg, + char *field1, char *field2, + char *field3, char *field4) +{ + int val1 = -1; + int val2 = -2; + int val3 = -3; + int val4 = -4; + int newval; + int matches = 0; + + if (!of_property_read_u32(of_node, field1, &val1)) + matches++; + + if (!of_property_read_u32(of_node, field2, &val2)) + matches++; + + if (!of_property_read_u32(of_node, field3, &val3)) + matches++; + + if (!of_property_read_u32(of_node, field4, &val4)) + matches++; + + if (!matches) + return 0; + + if (matches < 4) + newval = kszphy_extended_read(phydev, reg); + else + newval = 0; + + if (val1 != -1) + newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0); + + if (val2 != -1) + newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4); + + if (val3 != -1) + newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8); + + if (val4 != -1) + newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12); + + return kszphy_extended_write(phydev, reg, newval); +} + +static int ksz9021_config_init(struct phy_device *phydev) +{ + struct device *dev = &phydev->dev; + struct device_node *of_node = dev->of_node; + + if (!of_node && dev->parent->of_node) + of_node = dev->parent->of_node; + + if (of_node) { + ksz9021_load_values_from_of(phydev, of_node, + MII_KSZPHY_CLK_CONTROL_PAD_SKEW, + "txen-skew-ps", "txc-skew-ps", + "rxdv-skew-ps", "rxc-skew-ps"); + ksz9021_load_values_from_of(phydev, of_node, + MII_KSZPHY_RX_DATA_PAD_SKEW, + "rxd0-skew-ps", "rxd1-skew-ps", + "rxd2-skew-ps", "rxd3-skew-ps"); + ksz9021_load_values_from_of(phydev, of_node, + MII_KSZPHY_TX_DATA_PAD_SKEW, + "txd0-skew-ps", "txd1-skew-ps", + "txd2-skew-ps", "txd3-skew-ps"); + } + return 0; +} + #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 #define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6) #define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4) @@ -281,7 +382,7 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ9021 Gigabit PHY", .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init, + .config_init = ksz9021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, -- cgit v0.10.2 From 1ddff7da0faecffdcdeab3d981fb8241453cea44 Mon Sep 17 00:00:00 2001 From: Alexander Shiyan Date: Mon, 19 Aug 2013 15:39:19 +0400 Subject: can: mcp251x: Replace power callbacks with regulator API This patch replaces power callbacks to the regulator API. To improve the readability of the code, helper for the regulator enable/disable was added. Acked-by: Haojian Zhuang Signed-off-by: Alexander Shiyan Signed-off-by: Marc Kleine-Budde diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c index fe31bfc..c98511c 100644 --- a/arch/arm/mach-pxa/icontrol.c +++ b/arch/arm/mach-pxa/icontrol.c @@ -73,9 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = { static struct mcp251x_platform_data mcp251x_info = { .oscillator_frequency = 16E6, - .board_specific_setup = NULL, - .power_enable = NULL, - .transceiver_enable = NULL }; static struct spi_board_info mcp251x_board_info[] = { diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index f5d4364..04a0aea 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include @@ -391,33 +393,34 @@ static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = { }; /* CAN bus on SPI */ -static int zeus_mcp2515_setup(struct spi_device *sdev) -{ - int err; - - err = gpio_request(ZEUS_CAN_SHDN_GPIO, "CAN shutdown"); - if (err) - return err; +static struct regulator_consumer_supply can_regulator_consumer = + REGULATOR_SUPPLY("vdd", "spi3.0"); - err = gpio_direction_output(ZEUS_CAN_SHDN_GPIO, 1); - if (err) { - gpio_free(ZEUS_CAN_SHDN_GPIO); - return err; - } +static struct regulator_init_data can_regulator_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .consumer_supplies = &can_regulator_consumer, + .num_consumer_supplies = 1, +}; - return 0; -} +static struct fixed_voltage_config can_regulator_pdata = { + .supply_name = "CAN_SHDN", + .microvolts = 3300000, + .gpio = ZEUS_CAN_SHDN_GPIO, + .init_data = &can_regulator_init_data, +}; -static int zeus_mcp2515_transceiver_enable(int enable) -{ - gpio_set_value(ZEUS_CAN_SHDN_GPIO, !enable); - return 0; -} +static struct platform_device can_regulator_device = { + .name = "reg-fixed-volage", + .id = -1, + .dev = { + .platform_data = &can_regulator_pdata, + }, +}; static struct mcp251x_platform_data zeus_mcp2515_pdata = { .oscillator_frequency = 16*1000*1000, - .board_specific_setup = zeus_mcp2515_setup, - .power_enable = zeus_mcp2515_transceiver_enable, }; static struct spi_board_info zeus_spi_board_info[] = { @@ -516,6 +519,7 @@ static struct platform_device *zeus_devices[] __initdata = { &zeus_leds_device, &zeus_pcmcia_device, &zeus_max6369_device, + &can_regulator_device, }; /* AC'97 */ diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 8cda23b..d2c5488 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -37,9 +37,6 @@ * * static struct mcp251x_platform_data mcp251x_info = { * .oscillator_frequency = 8000000, - * .board_specific_setup = &mcp251x_setup, - * .power_enable = mcp251x_power_enable, - * .transceiver_enable = NULL, * }; * * static struct spi_board_info spi_board_info[] = { @@ -76,6 +73,7 @@ #include #include #include +#include /* SPI interface instruction set */ #define INSTRUCTION_WRITE 0x02 @@ -264,6 +262,8 @@ struct mcp251x_priv { #define AFTER_SUSPEND_POWER 4 #define AFTER_SUSPEND_RESTART 8 int restart_tx; + struct regulator *power; + struct regulator *transceiver; }; #define MCP251X_IS(_model) \ @@ -667,16 +667,25 @@ static int mcp251x_hw_probe(struct spi_device *spi) return (st1 == 0x80 && st2 == 0x07) ? 1 : 0; } +static int mcp251x_power_enable(struct regulator *reg, int enable) +{ + if (IS_ERR(reg)) + return 0; + + if (enable) + return regulator_enable(reg); + else + return regulator_disable(reg); +} + static void mcp251x_open_clean(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - struct mcp251x_platform_data *pdata = spi->dev.platform_data; free_irq(spi->irq, priv); mcp251x_hw_sleep(spi); - if (pdata->transceiver_enable) - pdata->transceiver_enable(0); + mcp251x_power_enable(priv->transceiver, 0); close_candev(net); } @@ -684,7 +693,6 @@ static int mcp251x_stop(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - struct mcp251x_platform_data *pdata = spi->dev.platform_data; close_candev(net); @@ -704,8 +712,7 @@ static int mcp251x_stop(struct net_device *net) mcp251x_hw_sleep(spi); - if (pdata->transceiver_enable) - pdata->transceiver_enable(0); + mcp251x_power_enable(priv->transceiver, 0); priv->can.state = CAN_STATE_STOPPED; @@ -939,8 +946,7 @@ static int mcp251x_open(struct net_device *net) } mutex_lock(&priv->mcp_lock); - if (pdata->transceiver_enable) - pdata->transceiver_enable(1); + mcp251x_power_enable(priv->transceiver, 1); priv->force_quit = 0; priv->tx_skb = NULL; @@ -956,8 +962,7 @@ static int mcp251x_open(struct net_device *net) flags, DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); - if (pdata->transceiver_enable) - pdata->transceiver_enable(0); + mcp251x_power_enable(priv->transceiver, 0); close_candev(net); goto open_unlock; } @@ -1026,6 +1031,19 @@ static int mcp251x_can_probe(struct spi_device *spi) CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; + + priv->power = devm_regulator_get(&spi->dev, "vdd"); + priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); + if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || + (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { + ret = -EPROBE_DEFER; + goto error_power; + } + + ret = mcp251x_power_enable(priv->power, 1); + if (ret) + goto error_power; + spi_set_drvdata(spi, priv); priv->spi = spi; @@ -1068,13 +1086,6 @@ static int mcp251x_can_probe(struct spi_device *spi) } } - if (pdata->power_enable) - pdata->power_enable(1); - - /* Call out to platform specific setup */ - if (pdata->board_specific_setup) - pdata->board_specific_setup(spi); - SET_NETDEV_DEV(net, &spi->dev); /* Configure the SPI bus */ @@ -1084,14 +1095,11 @@ static int mcp251x_can_probe(struct spi_device *spi) /* Here is OK to not lock the MCP, no one knows about it yet */ if (!mcp251x_hw_probe(spi)) { - dev_info(&spi->dev, "Probe failed\n"); + ret = -ENODEV; goto error_probe; } mcp251x_hw_sleep(spi); - if (pdata->transceiver_enable) - pdata->transceiver_enable(0); - ret = register_candev(net); if (ret) goto error_probe; @@ -1109,13 +1117,13 @@ error_rx_buf: if (!mcp251x_enable_dma) kfree(priv->spi_tx_buf); error_tx_buf: - free_candev(net); if (mcp251x_enable_dma) dma_free_coherent(&spi->dev, PAGE_SIZE, priv->spi_tx_buf, priv->spi_tx_dma); + mcp251x_power_enable(priv->power, 0); +error_power: + free_candev(net); error_alloc: - if (pdata->power_enable) - pdata->power_enable(0); dev_err(&spi->dev, "probe failed\n"); error_out: return ret; @@ -1123,12 +1131,10 @@ error_out: static int mcp251x_can_remove(struct spi_device *spi) { - struct mcp251x_platform_data *pdata = spi->dev.platform_data; struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; unregister_candev(net); - free_candev(net); if (mcp251x_enable_dma) { dma_free_coherent(&spi->dev, PAGE_SIZE, @@ -1138,8 +1144,9 @@ static int mcp251x_can_remove(struct spi_device *spi) kfree(priv->spi_rx_buf); } - if (pdata->power_enable) - pdata->power_enable(0); + mcp251x_power_enable(priv->power, 0); + + free_candev(net); return 0; } @@ -1149,7 +1156,6 @@ static int mcp251x_can_remove(struct spi_device *spi) static int mcp251x_can_suspend(struct device *dev) { struct spi_device *spi = to_spi_device(dev); - struct mcp251x_platform_data *pdata = spi->dev.platform_data; struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; @@ -1163,15 +1169,14 @@ static int mcp251x_can_suspend(struct device *dev) netif_device_detach(net); mcp251x_hw_sleep(spi); - if (pdata->transceiver_enable) - pdata->transceiver_enable(0); + mcp251x_power_enable(priv->transceiver, 0); priv->after_suspend = AFTER_SUSPEND_UP; } else { priv->after_suspend = AFTER_SUSPEND_DOWN; } - if (pdata->power_enable) { - pdata->power_enable(0); + if (!IS_ERR(priv->power)) { + regulator_disable(priv->power); priv->after_suspend |= AFTER_SUSPEND_POWER; } @@ -1181,16 +1186,14 @@ static int mcp251x_can_suspend(struct device *dev) static int mcp251x_can_resume(struct device *dev) { struct spi_device *spi = to_spi_device(dev); - struct mcp251x_platform_data *pdata = spi->dev.platform_data; struct mcp251x_priv *priv = spi_get_drvdata(spi); if (priv->after_suspend & AFTER_SUSPEND_POWER) { - pdata->power_enable(1); + mcp251x_power_enable(priv->power, 1); queue_work(priv->wq, &priv->restart_work); } else { if (priv->after_suspend & AFTER_SUSPEND_UP) { - if (pdata->transceiver_enable) - pdata->transceiver_enable(1); + mcp251x_power_enable(priv->transceiver, 1); queue_work(priv->wq, &priv->restart_work); } else { priv->after_suspend = 0; diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h index 089fe43..8a27256 100644 --- a/include/linux/can/platform/mcp251x.h +++ b/include/linux/can/platform/mcp251x.h @@ -9,26 +9,15 @@ #include -/** +/* * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data * @oscillator_frequency: - oscillator frequency in Hz * @irq_flags: - IRQF configuration flags - * @board_specific_setup: - called before probing the chip (power,reset) - * @transceiver_enable: - called to power on/off the transceiver - * @power_enable: - called to power on/off the mcp *and* the - * transceiver - * - * Please note that you should define power_enable or transceiver_enable or - * none of them. Defining both of them is no use. - * */ struct mcp251x_platform_data { unsigned long oscillator_frequency; unsigned long irq_flags; - int (*board_specific_setup)(struct spi_device *spi); - int (*transceiver_enable)(int enable); - int (*power_enable) (int enable); }; #endif /* __CAN_PLATFORM_MCP251X_H__ */ -- cgit v0.10.2 From ae5d589e5f9f3217656ada632869968178886ac6 Mon Sep 17 00:00:00 2001 From: Alexander Shiyan Date: Mon, 19 Aug 2013 15:39:20 +0400 Subject: can: mcp251x: Eliminate irq_flags from driver platform_data Flags is not used by boards, so remove this field from the driver platform_data. Signed-off-by: Alexander Shiyan Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index d2c5488..ec84ce1 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -935,8 +935,7 @@ static int mcp251x_open(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - struct mcp251x_platform_data *pdata = spi->dev.platform_data; - unsigned long flags; + unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING; int ret; ret = open_candev(net); @@ -952,12 +951,6 @@ static int mcp251x_open(struct net_device *net) priv->tx_skb = NULL; priv->tx_len = 0; - flags = IRQF_ONESHOT; - if (pdata->irq_flags) - flags |= pdata->irq_flags; - else - flags |= IRQF_TRIGGER_FALLING; - ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, flags, DEVICE_NAME, priv); if (ret) { diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h index 8a27256..dc029db 100644 --- a/include/linux/can/platform/mcp251x.h +++ b/include/linux/can/platform/mcp251x.h @@ -12,12 +12,10 @@ /* * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data * @oscillator_frequency: - oscillator frequency in Hz - * @irq_flags: - IRQF configuration flags */ struct mcp251x_platform_data { unsigned long oscillator_frequency; - unsigned long irq_flags; }; #endif /* __CAN_PLATFORM_MCP251X_H__ */ -- cgit v0.10.2 From b1ef05a5a20afe50d4188a5b59579bd140758cf0 Mon Sep 17 00:00:00 2001 From: Alexander Shiyan Date: Mon, 19 Aug 2013 15:39:21 +0400 Subject: can: mcp251x: Allow tuning SPI mode and limit maximal SPI speed Patch allow to use different mode settings for SPI (MODE3 for example) and limit maximal speed according to IC datasheet. Signed-off-by: Alexander Shiyan Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index ec84ce1..fe7dd69 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -1082,7 +1082,11 @@ static int mcp251x_can_probe(struct spi_device *spi) SET_NETDEV_DEV(net, &spi->dev); /* Configure the SPI bus */ - spi->mode = SPI_MODE_0; + spi->mode = spi->mode ? : SPI_MODE_0; + if (mcp251x_is_2510(spi)) + spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000; + else + spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000; spi->bits_per_word = 8; spi_setup(spi); -- cgit v0.10.2 From 5383825ca9a78b188341a288e45274166d2e4aea Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 13 Sep 2012 01:11:23 +0100 Subject: sfc: const-qualify source pointers for MMIO write functions Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 96759aee..39f6098 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -83,7 +83,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) } /* Write a normal 128-bit CSR, locking as appropriate. */ -static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, +static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, unsigned int reg) { unsigned long flags __attribute__ ((unused)); @@ -108,7 +108,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, /* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, - efx_qword_t *value, unsigned int index) + const efx_qword_t *value, unsigned int index) { unsigned int addr = index * sizeof(*value); unsigned long flags __attribute__ ((unused)); @@ -129,7 +129,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, } /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ -static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, +static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned int reg) { netif_vdbg(efx, hw, efx->net_dev, @@ -190,8 +190,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, } /* Write a 128-bit CSR forming part of a table */ -static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, - unsigned int reg, unsigned int index) +static inline void +efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg, unsigned int index) { efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); } @@ -239,8 +240,9 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, /* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of * RX_DESC_UPD or TX_DESC_UPD) */ -static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, - unsigned int reg, unsigned int page) +static inline void +_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg, unsigned int page) { efx_writed(efx, value, EFX_PAGED_REG(page, reg)); } @@ -256,7 +258,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, * collector register. */ static inline void _efx_writed_page_locked(struct efx_nic *efx, - efx_dword_t *value, + const efx_dword_t *value, unsigned int reg, unsigned int page) { -- cgit v0.10.2 From e847b53e9e3c6f7818fe2fcd1082148423020ccc Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 13 Sep 2012 01:11:24 +0100 Subject: sfc: Use efx_mcdi_mon() to find efx_mcdi_mon structure from efx_nic This needs to be done before we separate MCDI from siena_nic_data. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 1d552f0..fe42c3b 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -400,8 +400,7 @@ fail: void efx_mcdi_mon_remove(struct efx_nic *efx) { - struct siena_nic_data *nic_data = efx->nic_data; - struct efx_mcdi_mon *hwmon = &nic_data->hwmon; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); unsigned int i; for (i = 0; i < hwmon->n_attrs; i++) -- cgit v0.10.2 From ba34dd3df73eaa2aa4fbb82a91aade3f3a910acb Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 2 Aug 2013 14:10:12 +0300 Subject: Bluetooth: use DIV_ROUND_UP in suitable places in btmrvl_sdio There are two places where DIV_ROUND_UP may be used. It makes code a bit clearer. Signed-off-by: Andy Shevchenko Signed-off-by: Gustavo Padovan diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 75c2626..00da6df 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -486,7 +486,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) if (firmwarelen - offset < txlen) txlen = firmwarelen - offset; - tx_blocks = (txlen + blksz_dl - 1) / blksz_dl; + tx_blocks = DIV_ROUND_UP(txlen, blksz_dl); memcpy(fwbuf, &firmware[offset], txlen); } @@ -873,7 +873,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, } blksz = SDIO_BLOCK_SIZE; - buf_block_len = (nb + blksz - 1) / blksz; + buf_block_len = DIV_ROUND_UP(nb, blksz); sdio_claim_host(card->func); -- cgit v0.10.2 From 9d225d2208a6f17da5987ff4e7710b9e805cb5d6 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Thu, 8 Aug 2013 14:53:56 +0300 Subject: Bluetooth: Fix getting SCO socket options in deferred state When a socket is in deferred state there does actually exist an underlying connection even though the connection state is not yet BT_CONNECTED. In the deferred state it should therefore be allowed to get socket options that usually depend on a connection, such as SCO_OPTIONS and SCO_CONNINFO. This patch fixes the behavior of some user space code that behaves as follows without it: $ sudo tools/btiotest -i 00:1B:DC:xx:xx:xx -d -s accept=2 reject=-1 discon=-1 defer=1 sec=0 update_sec=0 prio=0 voice=0x0000 Listening for SCO connections bt_io_get(OPT_DEST): getsockopt(SCO_OPTIONS): Transport endpoint is not connected (107) Accepting connection Successfully connected to 60:D8:19:xx:xx:xx. handle=43, class=000000 The conditions that the patch updates the if-statements to is taken from similar code in l2cap_sock.c which correctly handles the deferred state. Signed-off-by: Johan Hedberg Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index e7bd4ee..2de7150 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -765,7 +765,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user switch (optname) { case SCO_OPTIONS: - if (sk->sk_state != BT_CONNECTED) { + if (sk->sk_state != BT_CONNECTED && + !(sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } @@ -781,7 +783,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user break; case SCO_CONNINFO: - if (sk->sk_state != BT_CONNECTED) { + if (sk->sk_state != BT_CONNECTED && + !(sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } -- cgit v0.10.2 From c7882cbd1151011ca8e6fb13530cd09eae1c39ee Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Tue, 13 Aug 2013 10:00:54 -0700 Subject: Bluetooth: Set different event mask for LE-only controllers In case of a Low Energy only controller it makes no sense to configure the full BR/EDR event mask. It will just enable events that can not be send anyway and there is no guarantee that such a controller will accept this value. Use event mask 0x90 0xe8 0x04 0x02 0x00 0x80 0x00 0x20 for LE-only controllers which enables the following events: Disconnection Complete Encryption Change Read Remote Version Information Complete Command Complete Command Status Hardware Error Number of Completed Packets Data Buffer Overflow Encryption Key Refresh Complete LE Meta This is according to Core Specification, Part E, Section 3. Signed-off-by: Marcel Holtmann Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index b821b19..8d9b87d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -454,6 +454,18 @@ static void hci_setup_event_mask(struct hci_request *req) events[4] |= 0x04; /* Read Remote Extended Features Complete */ events[5] |= 0x08; /* Synchronous Connection Complete */ events[5] |= 0x10; /* Synchronous Connection Changed */ + } else { + /* Use a different default for LE-only devices */ + memset(events, 0, sizeof(events)); + events[0] |= 0x10; /* Disconnection Complete */ + events[0] |= 0x80; /* Encryption Change */ + events[1] |= 0x08; /* Read Remote Version Information Complete */ + events[1] |= 0x20; /* Command Complete */ + events[1] |= 0x40; /* Command Status */ + events[1] |= 0x80; /* Hardware Error */ + events[2] |= 0x04; /* Number of Completed Packets */ + events[3] |= 0x02; /* Data Buffer Overflow */ + events[5] |= 0x80; /* Encryption Key Refresh Complete */ } if (lmp_inq_rssi_capable(hdev)) -- cgit v0.10.2 From 396dc223dd36edd218650d042a07c5e61f022c5b Mon Sep 17 00:00:00 2001 From: Gianluca Anzolin Date: Mon, 29 Jul 2013 17:08:08 +0200 Subject: Bluetooth: Take proper tty_struct references In net/bluetooth/rfcomm/tty.c the struct tty_struct is used without taking references. This may lead to a use-after-free of the rfcomm tty. Fix this by taking references properly, using the tty_port_* helpers when possible. The raw assignments of dev->port.tty in rfcomm_tty_open/close are addressed in the later commit 'rfcomm: Implement .activate, .shutdown and .carrier_raised methods'. Signed-off-by: Gianluca Anzolin Reviewed-by: Peter Hurley Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index b6e44ad..cd7ff37 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -333,10 +333,9 @@ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc) static void rfcomm_wfree(struct sk_buff *skb) { struct rfcomm_dev *dev = (void *) skb->sk; - struct tty_struct *tty = dev->port.tty; atomic_sub(skb->truesize, &dev->wmem_alloc); - if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty) - tty_wakeup(tty); + if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) + tty_port_tty_wakeup(&dev->port); tty_port_put(&dev->port); } @@ -410,6 +409,7 @@ static int rfcomm_release_dev(void __user *arg) { struct rfcomm_dev_req req; struct rfcomm_dev *dev; + struct tty_struct *tty; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; @@ -429,8 +429,11 @@ static int rfcomm_release_dev(void __user *arg) rfcomm_dlc_close(dev->dlc, 0); /* Shut down TTY synchronously before freeing rfcomm_dev */ - if (dev->port.tty) - tty_vhangup(dev->port.tty); + tty = tty_port_tty_get(&dev->port); + if (tty) { + tty_vhangup(tty); + tty_kref_put(tty); + } if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) rfcomm_dev_del(dev); @@ -563,6 +566,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) { struct rfcomm_dev *dev = dlc->owner; + struct tty_struct *tty; if (!dev) return; @@ -572,7 +576,8 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) wake_up_interruptible(&dev->wait); if (dlc->state == BT_CLOSED) { - if (!dev->port.tty) { + tty = tty_port_tty_get(&dev->port); + if (!tty) { if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { /* Drop DLC lock here to avoid deadlock * 1. rfcomm_dev_get will take rfcomm_dev_lock @@ -591,8 +596,10 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) tty_port_put(&dev->port); rfcomm_dlc_lock(dlc); } - } else - tty_hangup(dev->port.tty); + } else { + tty_hangup(tty); + tty_kref_put(tty); + } } } @@ -604,10 +611,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); - if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) { - if (dev->port.tty && !C_CLOCAL(dev->port.tty)) - tty_hangup(dev->port.tty); - } + if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) + tty_port_tty_hangup(&dev->port, true); dev->modem_status = ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | -- cgit v0.10.2 From ebe937f74b8a72cf3ceeae5c2194a160bb092901 Mon Sep 17 00:00:00 2001 From: Gianluca Anzolin Date: Mon, 29 Jul 2013 17:08:09 +0200 Subject: Bluetooth: Remove the device from the list in the destructor The current code removes the device from the device list in several places. Do it only in the destructor instead and in the error path of rfcomm_add_dev() if the device couldn't be initialized. Signed-off-by: Gianluca Anzolin Reviewed-by: Peter Hurley Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index cd7ff37..9c0e142 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -76,13 +76,6 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); /* ---- Device functions ---- */ -/* - * The reason this isn't actually a race, as you no doubt have a little voice - * screaming at you in your head, is that the refcount should never actually - * reach zero unless the device has already been taken off the list, in - * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in - * rfcomm_dev_destruct() anyway. - */ static void rfcomm_dev_destruct(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); @@ -90,10 +83,9 @@ static void rfcomm_dev_destruct(struct tty_port *port) BT_DBG("dev %p dlc %p", dev, dlc); - /* Refcount should only hit zero when called from rfcomm_dev_del() - which will have taken us off the list. Everything else are - refcounting bugs. */ - BUG_ON(!list_empty(&dev->list)); + spin_lock(&rfcomm_dev_lock); + list_del(&dev->list); + spin_unlock(&rfcomm_dev_lock); rfcomm_dlc_lock(dlc); /* Detach DLC if it's owned by this dev */ @@ -282,7 +274,9 @@ out: dev->id, NULL); if (IS_ERR(dev->tty_dev)) { err = PTR_ERR(dev->tty_dev); + spin_lock(&rfcomm_dev_lock); list_del(&dev->list); + spin_unlock(&rfcomm_dev_lock); goto free; } @@ -315,10 +309,6 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev) } spin_unlock_irqrestore(&dev->port.lock, flags); - spin_lock(&rfcomm_dev_lock); - list_del_init(&dev->list); - spin_unlock(&rfcomm_dev_lock); - tty_port_put(&dev->port); } @@ -750,13 +740,8 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) dev->port.tty = NULL; rfcomm_dlc_unlock(dev->dlc); - if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) { - spin_lock(&rfcomm_dev_lock); - list_del_init(&dev->list); - spin_unlock(&rfcomm_dev_lock); - + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) tty_port_put(&dev->port); - } } else spin_unlock_irqrestore(&dev->port.lock, flags); -- cgit v0.10.2 From 54b926a1434e817ca84cb090f36b56763e192470 Mon Sep 17 00:00:00 2001 From: Gianluca Anzolin Date: Mon, 29 Jul 2013 17:08:10 +0200 Subject: Bluetooth: Move the tty initialization and cleanup out of open/close Move the tty_struct initialization from rfcomm_tty_open() to rfcomm_tty_install() and do the same for the cleanup moving the code from rfcomm_tty_close() to rfcomm_tty_cleanup(). Add also extra error handling in rfcomm_tty_install() because, unlike .open()/.close(), .cleanup() is not called if .install() fails. Signed-off-by: Gianluca Anzolin Reviewed-by: Peter Hurley Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 9c0e142..73dd615 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -633,49 +633,61 @@ static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) tty_flip_buffer_push(&dev->port); } -static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) +/* do the reverse of install, clearing the tty fields and releasing the + * reference to tty_port + */ +static void rfcomm_tty_cleanup(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = tty->driver_data; + + if (dev->tty_dev->parent) + device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); + + /* Close DLC and dettach TTY */ + rfcomm_dlc_close(dev->dlc, 0); + + clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); + + rfcomm_dlc_lock(dev->dlc); + tty->driver_data = NULL; + dev->port.tty = NULL; + rfcomm_dlc_unlock(dev->dlc); + + tty_port_put(&dev->port); +} + +/* we acquire the tty_port reference since it's here the tty is first used + * by setting the termios. We also populate the driver_data field and install + * the tty port + */ +static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) { DECLARE_WAITQUEUE(wait, current); struct rfcomm_dev *dev; struct rfcomm_dlc *dlc; - unsigned long flags; - int err, id; - - id = tty->index; + int err; - BT_DBG("tty %p id %d", tty, id); - - /* We don't leak this refcount. For reasons which are not entirely - clear, the TTY layer will call our ->close() method even if the - open fails. We decrease the refcount there, and decreasing it - here too would cause breakage. */ - dev = rfcomm_dev_get(id); + dev = rfcomm_dev_get(tty->index); if (!dev) return -ENODEV; - BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, - dev->channel, dev->port.count); - - spin_lock_irqsave(&dev->port.lock, flags); - if (++dev->port.count > 1) { - spin_unlock_irqrestore(&dev->port.lock, flags); - return 0; - } - spin_unlock_irqrestore(&dev->port.lock, flags); - dlc = dev->dlc; /* Attach TTY and open DLC */ - rfcomm_dlc_lock(dlc); tty->driver_data = dev; dev->port.tty = tty; rfcomm_dlc_unlock(dlc); set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); + /* install the tty_port */ + err = tty_port_install(&dev->port, driver, tty); + if (err < 0) + goto error_no_dlc; + err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel); if (err < 0) - return err; + goto error_no_dlc; /* Wait for DLC to connect */ add_wait_queue(&dev->wait, &wait); @@ -702,15 +714,45 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) set_current_state(TASK_RUNNING); remove_wait_queue(&dev->wait, &wait); - if (err == 0) - device_move(dev->tty_dev, rfcomm_get_device(dev), - DPM_ORDER_DEV_AFTER_PARENT); + if (err < 0) + goto error_no_connection; + + device_move(dev->tty_dev, rfcomm_get_device(dev), + DPM_ORDER_DEV_AFTER_PARENT); + return 0; + +error_no_connection: + rfcomm_dlc_close(dlc, err); +error_no_dlc: + clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); + tty_port_put(&dev->port); + return err; +} + +static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) +{ + struct rfcomm_dev *dev = tty->driver_data; + unsigned long flags; + + BT_DBG("tty %p id %d", tty, tty->index); + BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, + dev->channel, dev->port.count); + + spin_lock_irqsave(&dev->port.lock, flags); + dev->port.count++; + spin_unlock_irqrestore(&dev->port.lock, flags); + + /* + * FIXME: rfcomm should use proper flow control for + * received data. This hack will be unnecessary and can + * be removed when that's implemented + */ rfcomm_tty_copy_pending(dev); rfcomm_dlc_unthrottle(dev->dlc); - return err; + return 0; } static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) @@ -727,25 +769,11 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) spin_lock_irqsave(&dev->port.lock, flags); if (!--dev->port.count) { spin_unlock_irqrestore(&dev->port.lock, flags); - if (dev->tty_dev->parent) - device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); - - /* Close DLC and dettach TTY */ - rfcomm_dlc_close(dev->dlc, 0); - - clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); - - rfcomm_dlc_lock(dev->dlc); - tty->driver_data = NULL; - dev->port.tty = NULL; - rfcomm_dlc_unlock(dev->dlc); if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) tty_port_put(&dev->port); } else spin_unlock_irqrestore(&dev->port.lock, flags); - - tty_port_put(&dev->port); } static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) @@ -1118,6 +1146,8 @@ static const struct tty_operations rfcomm_ops = { .wait_until_sent = rfcomm_tty_wait_until_sent, .tiocmget = rfcomm_tty_tiocmget, .tiocmset = rfcomm_tty_tiocmset, + .install = rfcomm_tty_install, + .cleanup = rfcomm_tty_cleanup, }; int __init rfcomm_init_ttys(void) -- cgit v0.10.2 From cad348a17e170451ea8688b532a6ca3e98c63b60 Mon Sep 17 00:00:00 2001 From: Gianluca Anzolin Date: Mon, 29 Jul 2013 17:08:11 +0200 Subject: Bluetooth: Implement .activate, .shutdown and .carrier_raised methods Implement .activate, .shutdown and .carrier_raised methods of tty_port to manage the dlc, moving the code from rfcomm_tty_install() and rfcomm_tty_cleanup() functions. At the same time the tty .open()/.close() and .hangup() methods are changed to use the tty_port helpers that properly call the aforementioned tty_port methods. Signed-off-by: Gianluca Anzolin Reviewed-by: Peter Hurley Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 73dd615..583f713 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -58,7 +58,6 @@ struct rfcomm_dev { uint modem_status; struct rfcomm_dlc *dlc; - wait_queue_head_t wait; struct device *tty_dev; @@ -104,8 +103,39 @@ static void rfcomm_dev_destruct(struct tty_port *port) module_put(THIS_MODULE); } +/* device-specific initialization: open the dlc */ +static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty) +{ + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); + + return rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel); +} + +/* we block the open until the dlc->state becomes BT_CONNECTED */ +static int rfcomm_dev_carrier_raised(struct tty_port *port) +{ + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); + + return (dev->dlc->state == BT_CONNECTED); +} + +/* device-specific cleanup: close the dlc */ +static void rfcomm_dev_shutdown(struct tty_port *port) +{ + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); + + if (dev->tty_dev->parent) + device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); + + /* close the dlc */ + rfcomm_dlc_close(dev->dlc, 0); +} + static const struct tty_port_operations rfcomm_port_ops = { .destruct = rfcomm_dev_destruct, + .activate = rfcomm_dev_activate, + .shutdown = rfcomm_dev_shutdown, + .carrier_raised = rfcomm_dev_carrier_raised, }; static struct rfcomm_dev *__rfcomm_dev_get(int id) @@ -228,7 +258,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) tty_port_init(&dev->port); dev->port.ops = &rfcomm_port_ops; - init_waitqueue_head(&dev->wait); skb_queue_head_init(&dev->pending); @@ -563,9 +592,12 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) BT_DBG("dlc %p dev %p err %d", dlc, dev, err); dev->err = err; - wake_up_interruptible(&dev->wait); + if (dlc->state == BT_CONNECTED) { + device_move(dev->tty_dev, rfcomm_get_device(dev), + DPM_ORDER_DEV_AFTER_PARENT); - if (dlc->state == BT_CLOSED) { + wake_up_interruptible(&dev->port.open_wait); + } else if (dlc->state == BT_CLOSED) { tty = tty_port_tty_get(&dev->port); if (!tty) { if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { @@ -640,17 +672,10 @@ static void rfcomm_tty_cleanup(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; - if (dev->tty_dev->parent) - device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); - - /* Close DLC and dettach TTY */ - rfcomm_dlc_close(dev->dlc, 0); - clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); rfcomm_dlc_lock(dev->dlc); tty->driver_data = NULL; - dev->port.tty = NULL; rfcomm_dlc_unlock(dev->dlc); tty_port_put(&dev->port); @@ -662,7 +687,6 @@ static void rfcomm_tty_cleanup(struct tty_struct *tty) */ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) { - DECLARE_WAITQUEUE(wait, current); struct rfcomm_dev *dev; struct rfcomm_dlc *dlc; int err; @@ -676,72 +700,30 @@ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) /* Attach TTY and open DLC */ rfcomm_dlc_lock(dlc); tty->driver_data = dev; - dev->port.tty = tty; rfcomm_dlc_unlock(dlc); set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); /* install the tty_port */ err = tty_port_install(&dev->port, driver, tty); - if (err < 0) - goto error_no_dlc; - - err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel); - if (err < 0) - goto error_no_dlc; + if (err) + rfcomm_tty_cleanup(tty); - /* Wait for DLC to connect */ - add_wait_queue(&dev->wait, &wait); - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - - if (dlc->state == BT_CLOSED) { - err = -dev->err; - break; - } - - if (dlc->state == BT_CONNECTED) - break; - - if (signal_pending(current)) { - err = -EINTR; - break; - } - - tty_unlock(tty); - schedule(); - tty_lock(tty); - } - set_current_state(TASK_RUNNING); - remove_wait_queue(&dev->wait, &wait); - - if (err < 0) - goto error_no_connection; - - device_move(dev->tty_dev, rfcomm_get_device(dev), - DPM_ORDER_DEV_AFTER_PARENT); - return 0; - -error_no_connection: - rfcomm_dlc_close(dlc, err); -error_no_dlc: - clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); - tty_port_put(&dev->port); return err; } static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = tty->driver_data; - unsigned long flags; + int err; BT_DBG("tty %p id %d", tty, tty->index); BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, dev->channel, dev->port.count); - spin_lock_irqsave(&dev->port.lock, flags); - dev->port.count++; - spin_unlock_irqrestore(&dev->port.lock, flags); + err = tty_port_open(&dev->port, tty, filp); + if (err) + return err; /* * FIXME: rfcomm should use proper flow control for @@ -758,7 +740,6 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; - unsigned long flags; if (!dev) return; @@ -766,14 +747,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, dev->port.count); - spin_lock_irqsave(&dev->port.lock, flags); - if (!--dev->port.count) { - spin_unlock_irqrestore(&dev->port.lock, flags); + tty_port_close(&dev->port, tty, filp); - if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) - tty_port_put(&dev->port); - } else - spin_unlock_irqrestore(&dev->port.lock, flags); + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) + tty_port_put(&dev->port); } static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) @@ -1076,7 +1053,7 @@ static void rfcomm_tty_hangup(struct tty_struct *tty) if (!dev) return; - rfcomm_tty_flush_buffer(tty); + tty_port_hangup(&dev->port); if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { if (rfcomm_dev_get(dev->id) == NULL) @@ -1166,7 +1143,7 @@ int __init rfcomm_init_ttys(void) rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL; rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; rfcomm_tty_driver->init_termios = tty_std_termios; - rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; + rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); -- cgit v0.10.2 From ece3150dea382c7c961fe2604332ed3474960d25 Mon Sep 17 00:00:00 2001 From: Gianluca Anzolin Date: Mon, 29 Jul 2013 17:08:12 +0200 Subject: Bluetooth: Fix the reference counting of tty_port The tty_port can be released in two cases: when we get a HUP in the functions rfcomm_tty_hangup() and rfcomm_dev_state_change(). Or when the user releases the device in rfcomm_release_dev(). In these cases we set the flag RFCOMM_TTY_RELEASED so that no other function can get a reference to the tty_port. The use of !test_and_set_bit(RFCOMM_TTY_RELEASED) ensures that the 'initial' tty_port reference is only dropped once. The rfcomm_dev_del function is removed becase it isn't used anymore. Signed-off-by: Gianluca Anzolin Reviewed-by: Peter Hurley Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 583f713..3e078b7 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -324,23 +324,6 @@ free: return err; } -static void rfcomm_dev_del(struct rfcomm_dev *dev) -{ - unsigned long flags; - BT_DBG("dev %p", dev); - - BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); - - spin_lock_irqsave(&dev->port.lock, flags); - if (dev->port.count > 0) { - spin_unlock_irqrestore(&dev->port.lock, flags); - return; - } - spin_unlock_irqrestore(&dev->port.lock, flags); - - tty_port_put(&dev->port); -} - /* ---- Send buffer ---- */ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc) { @@ -454,8 +437,9 @@ static int rfcomm_release_dev(void __user *arg) tty_kref_put(tty); } - if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) - rfcomm_dev_del(dev); + if (!test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)) + tty_port_put(&dev->port); + tty_port_put(&dev->port); return 0; } @@ -607,6 +591,9 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) * rfcomm_dev_lock -> dlc lock * 2. tty_port_put will deadlock if it's * the last reference + * + * FIXME: when we release the lock anything + * could happen to dev, even its destruction */ rfcomm_dlc_unlock(dlc); if (rfcomm_dev_get(dev->id) == NULL) { @@ -614,7 +601,10 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) return; } - rfcomm_dev_del(dev); + if (!test_and_set_bit(RFCOMM_TTY_RELEASED, + &dev->flags)) + tty_port_put(&dev->port); + tty_port_put(&dev->port); rfcomm_dlc_lock(dlc); } @@ -741,16 +731,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; - if (!dev) - return; - BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, dev->port.count); tty_port_close(&dev->port, tty, filp); - - if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) - tty_port_put(&dev->port); } static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) @@ -1050,17 +1034,11 @@ static void rfcomm_tty_hangup(struct tty_struct *tty) BT_DBG("tty %p dev %p", tty, dev); - if (!dev) - return; - tty_port_hangup(&dev->port); - if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { - if (rfcomm_dev_get(dev->id) == NULL) - return; - rfcomm_dev_del(dev); + if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) && + !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)) tty_port_put(&dev->port); - } } static int rfcomm_tty_tiocmget(struct tty_struct *tty) -- cgit v0.10.2 From ffe6b68cc5999a3f91a15b6667e69e14186e337d Mon Sep 17 00:00:00 2001 From: Gianluca Anzolin Date: Mon, 29 Jul 2013 17:08:13 +0200 Subject: Bluetooth: Purge the dlc->tx_queue to avoid circular dependency In rfcomm_tty_cleanup we purge the dlc->tx_queue which may contain socket buffers referencing the tty_port and thus preventing the tty_port destruction. Signed-off-by: Gianluca Anzolin Reviewed-by: Peter Hurley Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 3e078b7..6d126fa 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -668,6 +668,12 @@ static void rfcomm_tty_cleanup(struct tty_struct *tty) tty->driver_data = NULL; rfcomm_dlc_unlock(dev->dlc); + /* + * purge the dlc->tx_queue to avoid circular dependencies + * between dev and dlc + */ + skb_queue_purge(&dev->dlc->tx_queue); + tty_port_put(&dev->port); } -- cgit v0.10.2 From e660ed6c70370dae0887e2b5224d8b6c1e138120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:23:54 +0200 Subject: Bluetooth: Use hci_connect_sco directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hci_connect is a super function for connecting hci protocols. But the voice_setting parameter (introduced in subsequent patches) is only needed by SCO and security requirements are not needed for SCO channels. Thus, it makes sense to have a separate function for SCO. Signed-off-by: Frédéric Dalleau Acked-by: Marcel Holtmann Signed-off-by: Johan Hedberg Signed-off-by: Gustavo Padovan diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index f77885e..307a192 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -584,6 +584,8 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 dst_type, __u8 sec_level, __u8 auth_type); +struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, + bdaddr_t *dst); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 6c7f363..5f1f448 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -560,13 +560,12 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, return acl; } -static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, - bdaddr_t *dst, u8 sec_level, u8 auth_type) +struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst) { struct hci_conn *acl; struct hci_conn *sco; - acl = hci_connect_acl(hdev, dst, sec_level, auth_type); + acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); if (IS_ERR(acl)) return acl; @@ -612,9 +611,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type); case ACL_LINK: return hci_connect_acl(hdev, dst, sec_level, auth_type); - case SCO_LINK: - case ESCO_LINK: - return hci_connect_sco(hdev, type, dst, sec_level, auth_type); } return ERR_PTR(-EINVAL); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 2de7150..ab2502c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -176,8 +176,7 @@ static int sco_connect(struct sock *sk) else type = SCO_LINK; - hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW, - HCI_AT_NO_BONDING); + hcon = hci_connect_sco(hdev, type, dst); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; -- cgit v0.10.2 From 33f2404823f000f9b5fc570b0a08b1008e241578 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:23:55 +0200 Subject: Bluetooth: Remove unused mask parameter in sco_conn_defer_accept MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From Bluetooth Core v4.0 specification, 7.1.8 Accept Connection Request Command "When accepting synchronous connection request, the Role parameter is not used and will be ignored by the BR/EDR Controller." Signed-off-by: Frédéric Dalleau Acked-by: Marcel Holtmann Signed-off-by: Johan Hedberg Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ab2502c..acdca68 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -651,7 +651,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, return err; } -static void sco_conn_defer_accept(struct hci_conn *conn, int mask) +static void sco_conn_defer_accept(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; @@ -663,11 +663,7 @@ static void sco_conn_defer_accept(struct hci_conn *conn, int mask) struct hci_cp_accept_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); - - if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) - cp.role = 0x00; /* Become master */ - else - cp.role = 0x01; /* Remain slave */ + cp.role = 0x00; /* Ignored */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else { @@ -697,7 +693,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { - sco_conn_defer_accept(pi->conn->hcon, 0); + sco_conn_defer_accept(pi->conn->hcon); sk->sk_state = BT_CONFIG; msg->msg_namelen = 0; -- cgit v0.10.2 From ad10b1a48754b1381582d96f070a39832e41382d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:23:56 +0200 Subject: Bluetooth: Add Bluetooth socket voice option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch extends the current Bluetooth socket options with BT_VOICE. This is intended to choose voice data type at runtime. It only applies to SCO sockets. Incoming connections shall be setup during deferred setup. Outgoing connections shall be setup before connect(). The desired setting is stored in the SCO socket info. This patch declares needed members, modifies getsockopt() and setsockopt(). Signed-off-by: Frédéric Dalleau Acked-by: Marcel Holtmann Signed-off-by: Johan Hedberg Signed-off-by: Gustavo Padovan diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 10eb9b3..10d43d8 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -107,6 +107,14 @@ struct bt_power { */ #define BT_CHANNEL_POLICY_AMP_PREFERRED 2 +#define BT_VOICE 11 +struct bt_voice { + __u16 setting; +}; + +#define BT_VOICE_TRANSPARENT 0x0003 +#define BT_VOICE_CVSD_16BIT 0x0060 + __printf(1, 2) int bt_info(const char *fmt, ...); __printf(1, 2) diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index 1e35c43..e252a31 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -73,6 +73,7 @@ struct sco_conn { struct sco_pinfo { struct bt_sock bt; __u32 flags; + __u16 setting; struct sco_conn *conn; }; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index acdca68..678747e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -416,6 +416,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro sk->sk_protocol = proto; sk->sk_state = BT_OPEN; + sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; + setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk); bt_sock_link(&sco_sk_list, sk); @@ -709,7 +711,8 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; - int err = 0; + int len, err = 0; + struct bt_voice voice; u32 opt; BT_DBG("sk %p", sk); @@ -735,6 +738,31 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; + case BT_VOICE: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + voice.setting = sco_pi(sk)->setting; + + len = min_t(unsigned int, sizeof(voice), optlen); + if (copy_from_user((char *) &voice, optval, len)) { + err = -EFAULT; + break; + } + + /* Explicitly check for these values */ + if (voice.setting != BT_VOICE_TRANSPARENT && + voice.setting != BT_VOICE_CVSD_16BIT) { + err = -EINVAL; + break; + } + + sco_pi(sk)->setting = voice.setting; + break; + default: err = -ENOPROTOOPT; break; @@ -808,6 +836,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char { struct sock *sk = sock->sk; int len, err = 0; + struct bt_voice voice; BT_DBG("sk %p", sk); @@ -833,6 +862,15 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char break; + case BT_VOICE: + voice.setting = sco_pi(sk)->setting; + + len = min_t(unsigned int, len, sizeof(voice)); + if (copy_to_user(optval, (char *)&voice, len)) + err = -EFAULT; + + break; + default: err = -ENOPROTOOPT; break; -- cgit v0.10.2 From 5d4d62f6ca04c54ed6c84df6adf5427c52feda3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:23:57 +0200 Subject: Bluetooth: Add constants for SCO airmode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch defines constants for SCO airmode from SCO voice setting. It refers to Bluetooth Core V4.0 specification, Part E, Chap 6.12 which describe SCO voice setting format. Signed-off-by: Frédéric Dalleau Acked-by: Marcel Holtmann Signed-off-by: Johan Hedberg Signed-off-by: Gustavo Padovan diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 307a192..f403509 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1215,4 +1215,8 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], u8 bdaddr_to_le(u8 bdaddr_type); +#define SCO_AIRMODE_MASK 0x0003 +#define SCO_AIRMODE_CVSD 0x0000 +#define SCO_AIRMODE_TRANSP 0x0003 + #endif /* __HCI_CORE_H */ -- cgit v0.10.2 From 2f69a82acf6f971a9e184dd32b24c79a14388a6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:23:58 +0200 Subject: Bluetooth: Use voice setting in deferred SCO connection request MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When an incoming eSCO connection is requested, check the selected voice setting and reply appropriately. Voice setting should have been negotiated previously. For example, in case of HFP, the codec is negotiated using AT commands on the RFCOMM channel. This patch only changes replies for socket with deferred setup enabled. Signed-off-by: Frédéric Dalleau Signed-off-by: Johan Hedberg Acked-by: Marcel Holtmann Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 678747e..b1016c8 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -653,7 +653,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, return err; } -static void sco_conn_defer_accept(struct hci_conn *conn) +static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) { struct hci_dev *hdev = conn->hdev; @@ -676,9 +676,21 @@ static void sco_conn_defer_accept(struct hci_conn *conn) cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.max_latency = __constant_cpu_to_le16(0xffff); - cp.content_format = cpu_to_le16(hdev->voice_setting); - cp.retrans_effort = 0xff; + cp.content_format = cpu_to_le16(setting); + + switch (setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (conn->pkt_type & ESCO_2EV3) + cp.max_latency = __constant_cpu_to_le16(0x0008); + else + cp.max_latency = __constant_cpu_to_le16(0x000D); + cp.retrans_effort = 0x02; + break; + case SCO_AIRMODE_CVSD: + cp.max_latency = __constant_cpu_to_le16(0xffff); + cp.retrans_effort = 0xff; + break; + } hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); @@ -695,7 +707,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { - sco_conn_defer_accept(pi->conn->hcon); + sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; msg->msg_namelen = 0; -- cgit v0.10.2 From 10c62ddc6f032c3a096401ca3ce7e5b2d5780859 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:23:59 +0200 Subject: Bluetooth: Parameters for outgoing SCO connections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to establish a transparent SCO connection, the correct settings must be specified in the Setup Synchronous Connection request. For that, a setting field is added to ACL connection data to set up the desired parameters. The patch also removes usage of hdev->voice_setting in CVSD connection and makes use of T2 parameters for transparent data. Signed-off-by: Frédéric Dalleau Signed-off-by: Johan Hedberg Acked-by: Marcel Holtmann Signed-off-by: Gustavo Padovan diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index f403509..61ca2ce 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -320,6 +320,7 @@ struct hci_conn { __u32 passkey_notify; __u8 passkey_entered; __u16 disc_timeout; + __u16 setting; unsigned long flags; __u8 remote_cap; @@ -584,8 +585,8 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 dst_type, __u8 sec_level, __u8 auth_type); -struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, - bdaddr_t *dst); +struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, + __u16 setting); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5f1f448..c0e56a5 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -185,13 +185,24 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) conn->attempt++; cp.handle = cpu_to_le16(handle); - cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.max_latency = __constant_cpu_to_le16(0xffff); - cp.voice_setting = cpu_to_le16(hdev->voice_setting); - cp.retrans_effort = 0xff; + cp.voice_setting = cpu_to_le16(conn->setting); + + switch (conn->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + cp.pkt_type = __constant_cpu_to_le16(EDR_ESCO_MASK & + ~ESCO_2EV3); + cp.max_latency = __constant_cpu_to_le16(0x000d); + cp.retrans_effort = 0x02; + break; + case SCO_AIRMODE_CVSD: + cp.pkt_type = cpu_to_le16(conn->pkt_type); + cp.max_latency = __constant_cpu_to_le16(0xffff); + cp.retrans_effort = 0xff; + break; + } hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); } @@ -560,7 +571,8 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, return acl; } -struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst) +struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, + __u16 setting) { struct hci_conn *acl; struct hci_conn *sco; @@ -583,6 +595,8 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst) hci_conn_hold(sco); + sco->setting = setting; + if (acl->state == BT_CONNECTED && (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { set_bit(HCI_CONN_POWER_SAVE, &acl->flags); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index b1016c8..ed581b4 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -176,7 +176,7 @@ static int sco_connect(struct sock *sk) else type = SCO_LINK; - hcon = hci_connect_sco(hdev, type, dst); + hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; -- cgit v0.10.2 From 07a5c61eda7f23883273f738edbf6caaebb71923 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:24:00 +0200 Subject: Bluetooth: Add constants and macro declaration for transparent data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch defines constants and macro for transparent data LMP features. It refers to Bluetooth Core V4.0 specification, Part C, Chap 3.3 which defines LMP feature mask. Signed-off-by: Frédéric Dalleau Acked-by: Marcel Holtmann Signed-off-by: Johan Hedberg Signed-off-by: Gustavo Padovan diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index a01fbb4..aaeaf09 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -238,6 +238,7 @@ enum { #define LMP_CVSD 0x01 #define LMP_PSCHEME 0x02 #define LMP_PCONTROL 0x04 +#define LMP_TRANSPARENT 0x08 #define LMP_RSSI_INQ 0x40 #define LMP_ESCO 0x80 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 61ca2ce..b2bfab8 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -800,6 +800,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) +#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) /* ----- Extended LMP capabilities ----- */ #define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) -- cgit v0.10.2 From 79dc0087c33f06a8c35d8c9e37ea6307b790bc4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:24:01 +0200 Subject: Bluetooth: Prevent transparent SCO on older devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Older Bluetooth devices may not support Setup Synchronous Connection or SCO transparent data. This is indicated by the corresponding LMP feature bits. It is not possible to know if the adapter support these features before setting BT_VOICE option since the socket is not bound to an adapter. An adapter can also be added after the socket is created. The socket can be bound to an address before adapter is plugged in. Thus, on a such adapters, if user request BT_VOICE_TRANSPARENT, outgoing connections fail on connect() and returns -EOPNOTSUPP. Incoming connections do not fail. However, they should only be allowed depending on what was specified in Write_Voice_Settings command. EOPNOTSUPP is choosen because connect() system call is failing after selecting route but before any connection attempt. Signed-off-by: Frédéric Dalleau Signed-off-by: Johan Hedberg Acked-by: Marcel Holtmann Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ed581b4..96bd388 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -176,6 +176,12 @@ static int sco_connect(struct sock *sk) else type = SCO_LINK; + if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && + (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) { + err = -EOPNOTSUPP; + goto done; + } + hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); -- cgit v0.10.2 From 1a4c958cf9b1e159bc63d63b9e362904dd2c4ac3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:24:02 +0200 Subject: Bluetooth: Handle specific error for SCO connection fallback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Synchronous Connection Complete event can return error "Connection Rejected due to Limited resources (0x10)". Handling this error is required for SCO connection fallback. This error happens when the server tried to accept the connection but failed to negotiate settings. This error code has been verified experimentally by sending a T2 request to a T1 only SCO listener. Client dump follows : < HCI Command (0x01|0x0028) plen 17 [hci0] 3.696064 Handle: 12 Transmit bandwidth: 8000 Receive bandwidth: 8000 Max latency: 13 Setting: 0x0003 Retransmission effort: Optimize for link quality (0x02) Packet type: 0x0380 > HCI Event (0x0f) plen 4 [hci0] 3.697034 Setup Synchronous Connection (0x01|0x0028) ncmd 1 Status: Success (0x00) > HCI Event (0x2c) plen 17 [hci0] 3.736059 Status: Connection Rejected due to Limited Resources (0x0d) Handle: 0 Address: xx:xx:xx:xx:xx:AB (OUI 70-F3-95) Link type: eSCO (0x02) Transmission interval: 0x0c Retransmission window: 0x06 RX packet length: 60 TX packet length: 60 Air mode: Transparent (0x03) Server dump follows : > HCI Event (0x04) plen 10 [hci0] 4.741513 Address: xx:xx:xx:xx:xx:D9 (OUI 20-68-9D) Class: 0x620100 Major class: Computer (desktop, notebook, PDA, organizers) Minor class: Uncategorized, code for device not assigned Networking (LAN, Ad hoc) Audio (Speaker, Microphone, Headset) Telephony (Cordless telephony, Modem, Headset) Link type: eSCO (0x02) < HCI Command (0x01|0x0029) plen 21 [hci0] 4.743269 Address: xx:xx:xx:xx:xx:D9 (OUI 20-68-9D) Transmit bandwidth: 8000 Receive bandwidth: 8000 Max latency: 13 Setting: 0x0003 Retransmission effort: Optimize for link quality (0x02) Packet type: 0x03c1 > HCI Event (0x0f) plen 4 [hci0] 4.745517 Accept Synchronous Connection (0x01|0x0029) ncmd 1 Status: Success (0x00) > HCI Event (0x2c) plen 17 [hci0] 4.749508 Status: Connection Rejected due to Limited Resources (0x0d) Handle: 0 Address: xx:xx:xx:xx:xx:D9 (OUI 20-68-9D) Link type: eSCO (0x02) Transmission interval: 0x0c Retransmission window: 0x06 RX packet length: 60 TX packet length: 60 Air mode: Transparent (0x03) Signed-off-by: Frédéric Dalleau Acked-by: Marcel Holtmann Signed-off-by: Gustavo Padovan diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 50e39f4..a8bb7bb 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2904,6 +2904,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, hci_conn_add_sysfs(conn); break; + case 0x0d: /* Connection Rejected due to Limited Resources */ case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ -- cgit v0.10.2 From 2dea632f9acad076370fe871d4ccc93868621403 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Dalleau?= Date: Mon, 19 Aug 2013 14:24:03 +0200 Subject: Bluetooth: Add SCO connection fallback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When initiating a transparent eSCO connection, make use of T2 settings at first try. T2 is the recommended settings from HFP 1.6 WideBand Speech. Upon connection failure, try T1 settings. When CVSD is requested and eSCO is supported, try to establish eSCO connection using S3 settings. If it fails, fallback in sequence to S2, S1, D1, D0 settings. To know which setting should be used, conn->attempt is used. It indicates the currently ongoing SCO connection attempt and can be used as the index for the fallback settings table. These setting and the fallback order are described in Bluetooth HFP 1.6 specification p. 101. Signed-off-by: Frédéric Dalleau Signed-off-by: Johan Hedberg Signed-off-by: Gustavo Padovan diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index b2bfab8..3ede820 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -570,7 +570,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, } void hci_disconnect(struct hci_conn *conn, __u8 reason); -void hci_setup_sync(struct hci_conn *conn, __u16 handle); +bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index c0e56a5..f081712 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -31,6 +31,24 @@ #include #include +struct sco_param { + u16 pkt_type; + u16 max_latency; +}; + +static const struct sco_param sco_param_cvsd[] = { + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */ + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */ + { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */ + { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */ + { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */ +}; + +static const struct sco_param sco_param_wideband[] = { + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */ + { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */ +}; + static void hci_le_create_connection(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; @@ -172,10 +190,11 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle) hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); } -void hci_setup_sync(struct hci_conn *conn, __u16 handle) +bool hci_setup_sync(struct hci_conn *conn, __u16 handle) { struct hci_dev *hdev = conn->hdev; struct hci_cp_setup_sync_conn cp; + const struct sco_param *param; BT_DBG("hcon %p", conn); @@ -192,19 +211,28 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_TRANSP: - cp.pkt_type = __constant_cpu_to_le16(EDR_ESCO_MASK & - ~ESCO_2EV3); - cp.max_latency = __constant_cpu_to_le16(0x000d); + if (conn->attempt > ARRAY_SIZE(sco_param_wideband)) + return false; cp.retrans_effort = 0x02; + param = &sco_param_wideband[conn->attempt - 1]; break; case SCO_AIRMODE_CVSD: - cp.pkt_type = cpu_to_le16(conn->pkt_type); - cp.max_latency = __constant_cpu_to_le16(0xffff); - cp.retrans_effort = 0xff; + if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) + return false; + cp.retrans_effort = 0x01; + param = &sco_param_cvsd[conn->attempt - 1]; break; + default: + return false; } - hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); + cp.pkt_type = __cpu_to_le16(param->pkt_type); + cp.max_latency = __cpu_to_le16(param->max_latency); + + if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) + return false; + + return true; } void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a8bb7bb..94aab73 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2909,11 +2909,11 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ case 0x1f: /* Unspecified error */ - if (conn->out && conn->attempt < 2) { + if (conn->out) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | (hdev->esco_type & EDR_ESCO_MASK); - hci_setup_sync(conn, conn->link->handle); - goto unlock; + if (hci_setup_sync(conn, conn->link->handle)) + goto unlock; } /* fall through */ -- cgit v0.10.2 From 9dd3a13b885fef7dca0d2f27517a50b5fb5097c6 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 13 Sep 2012 01:11:25 +0100 Subject: sfc: Move details of a Falcon bug workaround out of ethtool.c Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 1fc2145..4db37f7 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -709,7 +709,6 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); u8 wanted_fc, old_fc; u32 old_adv; - bool reset; int rc = 0; mutex_lock(&efx->mac_lock); @@ -732,24 +731,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, goto out; } - /* TX flow control may automatically turn itself off if the - * link partner (intermittently) stops responding to pause - * frames. There isn't any indication that this has happened, - * so the best we do is leave it up to the user to spot this - * and fix it be cycling transmit flow control on this end. */ - reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); - if (EFX_WORKAROUND_11482(efx) && reset) { - if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { - /* Recover by resetting the EM block */ - falcon_stop_nic_stats(efx); - falcon_drain_tx_fifo(efx); - falcon_reconfigure_xmac(efx); - falcon_start_nic_stats(efx); - } else { - /* Schedule a reset to recover */ - efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); - } - } + /* Hook for Falcon bug 11482 workaround */ + if (efx->type->prepare_enable_fc_tx && + (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) + efx->type->prepare_enable_fc_tx(efx); old_adv = efx->link_advertising; old_fc = efx->wanted_fc; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 71998e7..6b6ac6d 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -497,7 +497,7 @@ static void falcon_reset_macs(struct efx_nic *efx) falcon_setup_xaui(efx); } -void falcon_drain_tx_fifo(struct efx_nic *efx) +static void falcon_drain_tx_fifo(struct efx_nic *efx) { efx_oword_t reg; @@ -678,6 +678,28 @@ static int falcon_reconfigure_port(struct efx_nic *efx) return 0; } +/* TX flow control may automatically turn itself off if the link + * partner (intermittently) stops responding to pause frames. There + * isn't any indication that this has happened, so the best we do is + * leave it up to the user to spot this and fix it by cycling transmit + * flow control on this end. + */ + +static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx) +{ + /* Schedule a reset to recover */ + efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); +} + +static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx) +{ + /* Recover by resetting the EM block */ + falcon_stop_nic_stats(efx); + falcon_drain_tx_fifo(efx); + falcon_reconfigure_xmac(efx); + falcon_start_nic_stats(efx); +} + /************************************************************************** * * PHY access via GMII @@ -1798,6 +1820,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, .reconfigure_port = falcon_reconfigure_port, + .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx, .reconfigure_mac = falcon_reconfigure_xmac, .check_mac_fault = falcon_xmac_check_fault, .get_wol = falcon_get_wol, @@ -1842,6 +1865,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, .reconfigure_port = falcon_reconfigure_port, + .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx, .reconfigure_mac = falcon_reconfigure_xmac, .check_mac_fault = falcon_xmac_check_fault, .get_wol = falcon_get_wol, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f4c7e6b..bdded38 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -946,6 +946,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @set_id_led: Set state of identifying LED or revert to automatic function * @push_irq_moderation: Apply interrupt moderation value * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY + * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL) * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings * to the hardware. Serialised by the mac_lock. * @check_mac_fault: Check MAC fault state. True if fault present. @@ -995,6 +996,7 @@ struct efx_nic_type { void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); void (*push_irq_moderation)(struct efx_channel *channel); int (*reconfigure_port)(struct efx_nic *efx); + void (*prepare_enable_fc_tx)(struct efx_nic *efx); int (*reconfigure_mac)(struct efx_nic *efx); bool (*check_mac_fault)(struct efx_nic *efx); void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index d63c299..a784363 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -298,7 +298,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel); extern bool efx_nic_event_present(struct efx_channel *channel); /* MAC/PHY */ -extern void falcon_drain_tx_fifo(struct efx_nic *efx); extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); extern bool falcon_xmac_check_fault(struct efx_nic *efx); extern int falcon_reconfigure_xmac(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index e4dd3a7..dff565a 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -30,8 +30,6 @@ /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor * or a PCIe error (bug 11028) */ #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS -/* Transmit flow control may get disabled */ -#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB /* Truncated IPv4 packets can confuse the TX packet parser */ #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB /* Legacy ISR read can return zero once */ -- cgit v0.10.2 From ab0115fc7dc429300fc6b728ab04ee832fbb945d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 13 Sep 2012 01:11:31 +0100 Subject: sfc: Move more Falcon-specific code and definitions into falcon.c In particular, fold in the whole of falcon_xmac.c. Drop some entirely unused definitions. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 945bf06..84f9c96 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -1,5 +1,5 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ - falcon_xmac.o mcdi_mac.o \ + mcdi_mac.o \ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ tenxpress.o txc43128_phy.o falcon_boards.o \ mcdi.o mcdi_phy.o mcdi_mon.o ptp.o diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 6b6ac6d..5228500 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -26,10 +26,200 @@ #include "phy.h" #include "workarounds.h" #include "selftest.h" +#include "mdio_10g.h" /* Hardware control for SFC4000 (aka Falcon). */ +/************************************************************************** + * + * MAC stats DMA format + * + ************************************************************************** + */ + +#define FALCON_MAC_STATS_SIZE 0x100 + +#define XgRxOctets_offset 0x0 +#define XgRxOctets_WIDTH 48 +#define XgRxOctetsOK_offset 0x8 +#define XgRxOctetsOK_WIDTH 48 +#define XgRxPkts_offset 0x10 +#define XgRxPkts_WIDTH 32 +#define XgRxPktsOK_offset 0x14 +#define XgRxPktsOK_WIDTH 32 +#define XgRxBroadcastPkts_offset 0x18 +#define XgRxBroadcastPkts_WIDTH 32 +#define XgRxMulticastPkts_offset 0x1C +#define XgRxMulticastPkts_WIDTH 32 +#define XgRxUnicastPkts_offset 0x20 +#define XgRxUnicastPkts_WIDTH 32 +#define XgRxUndersizePkts_offset 0x24 +#define XgRxUndersizePkts_WIDTH 32 +#define XgRxOversizePkts_offset 0x28 +#define XgRxOversizePkts_WIDTH 32 +#define XgRxJabberPkts_offset 0x2C +#define XgRxJabberPkts_WIDTH 32 +#define XgRxUndersizeFCSerrorPkts_offset 0x30 +#define XgRxUndersizeFCSerrorPkts_WIDTH 32 +#define XgRxDropEvents_offset 0x34 +#define XgRxDropEvents_WIDTH 32 +#define XgRxFCSerrorPkts_offset 0x38 +#define XgRxFCSerrorPkts_WIDTH 32 +#define XgRxAlignError_offset 0x3C +#define XgRxAlignError_WIDTH 32 +#define XgRxSymbolError_offset 0x40 +#define XgRxSymbolError_WIDTH 32 +#define XgRxInternalMACError_offset 0x44 +#define XgRxInternalMACError_WIDTH 32 +#define XgRxControlPkts_offset 0x48 +#define XgRxControlPkts_WIDTH 32 +#define XgRxPausePkts_offset 0x4C +#define XgRxPausePkts_WIDTH 32 +#define XgRxPkts64Octets_offset 0x50 +#define XgRxPkts64Octets_WIDTH 32 +#define XgRxPkts65to127Octets_offset 0x54 +#define XgRxPkts65to127Octets_WIDTH 32 +#define XgRxPkts128to255Octets_offset 0x58 +#define XgRxPkts128to255Octets_WIDTH 32 +#define XgRxPkts256to511Octets_offset 0x5C +#define XgRxPkts256to511Octets_WIDTH 32 +#define XgRxPkts512to1023Octets_offset 0x60 +#define XgRxPkts512to1023Octets_WIDTH 32 +#define XgRxPkts1024to15xxOctets_offset 0x64 +#define XgRxPkts1024to15xxOctets_WIDTH 32 +#define XgRxPkts15xxtoMaxOctets_offset 0x68 +#define XgRxPkts15xxtoMaxOctets_WIDTH 32 +#define XgRxLengthError_offset 0x6C +#define XgRxLengthError_WIDTH 32 +#define XgTxPkts_offset 0x80 +#define XgTxPkts_WIDTH 32 +#define XgTxOctets_offset 0x88 +#define XgTxOctets_WIDTH 48 +#define XgTxMulticastPkts_offset 0x90 +#define XgTxMulticastPkts_WIDTH 32 +#define XgTxBroadcastPkts_offset 0x94 +#define XgTxBroadcastPkts_WIDTH 32 +#define XgTxUnicastPkts_offset 0x98 +#define XgTxUnicastPkts_WIDTH 32 +#define XgTxControlPkts_offset 0x9C +#define XgTxControlPkts_WIDTH 32 +#define XgTxPausePkts_offset 0xA0 +#define XgTxPausePkts_WIDTH 32 +#define XgTxPkts64Octets_offset 0xA4 +#define XgTxPkts64Octets_WIDTH 32 +#define XgTxPkts65to127Octets_offset 0xA8 +#define XgTxPkts65to127Octets_WIDTH 32 +#define XgTxPkts128to255Octets_offset 0xAC +#define XgTxPkts128to255Octets_WIDTH 32 +#define XgTxPkts256to511Octets_offset 0xB0 +#define XgTxPkts256to511Octets_WIDTH 32 +#define XgTxPkts512to1023Octets_offset 0xB4 +#define XgTxPkts512to1023Octets_WIDTH 32 +#define XgTxPkts1024to15xxOctets_offset 0xB8 +#define XgTxPkts1024to15xxOctets_WIDTH 32 +#define XgTxPkts1519toMaxOctets_offset 0xBC +#define XgTxPkts1519toMaxOctets_WIDTH 32 +#define XgTxUndersizePkts_offset 0xC0 +#define XgTxUndersizePkts_WIDTH 32 +#define XgTxOversizePkts_offset 0xC4 +#define XgTxOversizePkts_WIDTH 32 +#define XgTxNonTcpUdpPkt_offset 0xC8 +#define XgTxNonTcpUdpPkt_WIDTH 16 +#define XgTxMacSrcErrPkt_offset 0xCC +#define XgTxMacSrcErrPkt_WIDTH 16 +#define XgTxIpSrcErrPkt_offset 0xD0 +#define XgTxIpSrcErrPkt_WIDTH 16 +#define XgDmaDone_offset 0xD4 +#define XgDmaDone_WIDTH 32 + +#define FALCON_STATS_NOT_DONE 0x00000000 +#define FALCON_STATS_DONE 0xffffffff + +#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) +#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) + +/* Retrieve statistic from statistics block */ +#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ + if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ + (efx)->mac_stats.efx_stat += le16_to_cpu( \ + *((__force __le16 *) \ + (efx->stats_buffer.addr + \ + FALCON_STAT_OFFSET(falcon_stat)))); \ + else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ + (efx)->mac_stats.efx_stat += le32_to_cpu( \ + *((__force __le32 *) \ + (efx->stats_buffer.addr + \ + FALCON_STAT_OFFSET(falcon_stat)))); \ + else \ + (efx)->mac_stats.efx_stat += le64_to_cpu( \ + *((__force __le64 *) \ + (efx->stats_buffer.addr + \ + FALCON_STAT_OFFSET(falcon_stat)))); \ + } while (0) + +/************************************************************************** + * + * Non-volatile configuration + * + ************************************************************************** + */ + +/* Board configuration v2 (v1 is obsolete; later versions are compatible) */ +struct falcon_nvconfig_board_v2 { + __le16 nports; + u8 port0_phy_addr; + u8 port0_phy_type; + u8 port1_phy_addr; + u8 port1_phy_type; + __le16 asic_sub_revision; + __le16 board_revision; +} __packed; + +/* Board configuration v3 extra information */ +struct falcon_nvconfig_board_v3 { + __le32 spi_device_type[2]; +} __packed; + +/* Bit numbers for spi_device_type */ +#define SPI_DEV_TYPE_SIZE_LBN 0 +#define SPI_DEV_TYPE_SIZE_WIDTH 5 +#define SPI_DEV_TYPE_ADDR_LEN_LBN 6 +#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2 +#define SPI_DEV_TYPE_ERASE_CMD_LBN 8 +#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8 +#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16 +#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5 +#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24 +#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5 +#define SPI_DEV_TYPE_FIELD(type, field) \ + (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field))) + +#define FALCON_NVCONFIG_OFFSET 0x300 + +#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C +struct falcon_nvconfig { + efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ + u8 mac_address[2][8]; /* 0x310 */ + efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ + efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */ + efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ + efx_oword_t hw_init_reg; /* 0x350 */ + efx_oword_t nic_stat_reg; /* 0x360 */ + efx_oword_t glb_ctl_reg; /* 0x370 */ + efx_oword_t srm_cfg_reg; /* 0x380 */ + efx_oword_t spare_reg; /* 0x390 */ + __le16 board_magic_num; /* 0x3A0 */ + __le16 board_struct_ver; + __le16 board_checksum; + struct falcon_nvconfig_board_v2 board_v2; + efx_oword_t ee_base_page_reg; /* 0x3B0 */ + struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */ +} __packed; + +/*************************************************************************/ + static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); +static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); static const unsigned int /* "Large" EEPROM device: Atmel AT25640 or similar @@ -418,6 +608,351 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, /************************************************************************** * + * XMAC operations + * + ************************************************************************** + */ + +/* Configure the XAUI driver that is an output from Falcon */ +static void falcon_setup_xaui(struct efx_nic *efx) +{ + efx_oword_t sdctl, txdrv; + + /* Move the XAUI into low power, unless there is no PHY, in + * which case the XAUI will have to drive a cable. */ + if (efx->phy_type == PHY_TYPE_NONE) + return; + + efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); + efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); + + EFX_POPULATE_OWORD_8(txdrv, + FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, + FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, + FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, + FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); + efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); +} + +int falcon_reset_xaui(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + int count; + + /* Don't fetch MAC statistics over an XMAC reset */ + WARN_ON(nic_data->stats_disable_count == 0); + + /* Start reset sequence */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); + efx_writeo(efx, ®, FR_AB_XX_PWR_RST); + + /* Wait up to 10 ms for completion, then reinitialise */ + for (count = 0; count < 1000; count++) { + efx_reado(efx, ®, FR_AB_XX_PWR_RST); + if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && + EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { + falcon_setup_xaui(efx); + return 0; + } + udelay(10); + } + netif_err(efx, hw, efx->net_dev, + "timed out waiting for XAUI/XGXS reset\n"); + return -ETIMEDOUT; +} + +static void falcon_ack_status_intr(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + + if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) + return; + + /* We expect xgmii faults if the wireside link is down */ + if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) + return; + + /* We can only use this interrupt to signal the negative edge of + * xaui_align [we have to poll the positive edge]. */ + if (nic_data->xmac_poll_required) + return; + + efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); +} + +static bool falcon_xgxs_link_ok(struct efx_nic *efx) +{ + efx_oword_t reg; + bool align_done, link_ok = false; + int sync_status; + + /* Read link status */ + efx_reado(efx, ®, FR_AB_XX_CORE_STAT); + + align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); + sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); + if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) + link_ok = true; + + /* Clear link status ready for next read */ + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); + efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); + + return link_ok; +} + +static bool falcon_xmac_link_ok(struct efx_nic *efx) +{ + /* + * Check MAC's XGXS link status except when using XGMII loopback + * which bypasses the XGXS block. + * If possible, check PHY's XGXS link status except when using + * MAC loopback. + */ + return (efx->loopback_mode == LOOPBACK_XGMII || + falcon_xgxs_link_ok(efx)) && + (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || + LOOPBACK_INTERNAL(efx) || + efx_mdio_phyxgxs_lane_sync(efx)); +} + +static void falcon_reconfigure_xmac_core(struct efx_nic *efx) +{ + unsigned int max_frame_len; + efx_oword_t reg; + bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); + bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); + + /* Configure MAC - cut-thru mode is hard wired on */ + EFX_POPULATE_OWORD_3(reg, + FRF_AB_XM_RX_JUMBO_MODE, 1, + FRF_AB_XM_TX_STAT_EN, 1, + FRF_AB_XM_RX_STAT_EN, 1); + efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); + + /* Configure TX */ + EFX_POPULATE_OWORD_6(reg, + FRF_AB_XM_TXEN, 1, + FRF_AB_XM_TX_PRMBL, 1, + FRF_AB_XM_AUTO_PAD, 1, + FRF_AB_XM_TXCRC, 1, + FRF_AB_XM_FCNTL, tx_fc, + FRF_AB_XM_IPG, 0x3); + efx_writeo(efx, ®, FR_AB_XM_TX_CFG); + + /* Configure RX */ + EFX_POPULATE_OWORD_5(reg, + FRF_AB_XM_RXEN, 1, + FRF_AB_XM_AUTO_DEPAD, 0, + FRF_AB_XM_ACPT_ALL_MCAST, 1, + FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, + FRF_AB_XM_PASS_CRC_ERR, 1); + efx_writeo(efx, ®, FR_AB_XM_RX_CFG); + + /* Set frame length */ + max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); + EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); + efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); + EFX_POPULATE_OWORD_2(reg, + FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, + FRF_AB_XM_TX_JUMBO_MODE, 1); + efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); + + EFX_POPULATE_OWORD_2(reg, + FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ + FRF_AB_XM_DIS_FCNTL, !rx_fc); + efx_writeo(efx, ®, FR_AB_XM_FC); + + /* Set MAC address */ + memcpy(®, &efx->net_dev->dev_addr[0], 4); + efx_writeo(efx, ®, FR_AB_XM_ADR_LO); + memcpy(®, &efx->net_dev->dev_addr[4], 2); + efx_writeo(efx, ®, FR_AB_XM_ADR_HI); +} + +static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) +{ + efx_oword_t reg; + bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); + bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); + bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); + + /* XGXS block is flaky and will need to be reset if moving + * into our out of XGMII, XGXS or XAUI loopbacks. */ + if (EFX_WORKAROUND_5147(efx)) { + bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; + bool reset_xgxs; + + efx_reado(efx, ®, FR_AB_XX_CORE_STAT); + old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); + old_xgmii_loopback = + EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); + + efx_reado(efx, ®, FR_AB_XX_SD_CTL); + old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); + + /* The PHY driver may have turned XAUI off */ + reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || + (xaui_loopback != old_xaui_loopback) || + (xgmii_loopback != old_xgmii_loopback)); + + if (reset_xgxs) + falcon_reset_xaui(efx); + } + + efx_reado(efx, ®, FR_AB_XX_CORE_STAT); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, + (xgxs_loopback || xaui_loopback) ? + FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); + efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); + + efx_reado(efx, ®, FR_AB_XX_SD_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); + efx_writeo(efx, ®, FR_AB_XX_SD_CTL); +} + + +/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ +static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) +{ + bool mac_up = falcon_xmac_link_ok(efx); + + if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || + efx_phy_mode_disabled(efx->phy_mode)) + /* XAUI link is expected to be down */ + return mac_up; + + falcon_stop_nic_stats(efx); + + while (!mac_up && tries) { + netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); + falcon_reset_xaui(efx); + udelay(200); + + mac_up = falcon_xmac_link_ok(efx); + --tries; + } + + falcon_start_nic_stats(efx); + + return mac_up; +} + +static bool falcon_xmac_check_fault(struct efx_nic *efx) +{ + return !falcon_xmac_link_ok_retry(efx, 5); +} + +static int falcon_reconfigure_xmac(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + + falcon_reconfigure_xgxs_core(efx); + falcon_reconfigure_xmac_core(efx); + + falcon_reconfigure_mac_wrapper(efx); + + nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); + falcon_ack_status_intr(efx); + + return 0; +} + +static void falcon_update_stats_xmac(struct efx_nic *efx) +{ + struct efx_mac_stats *mac_stats = &efx->mac_stats; + + /* Update MAC stats from DMAed values */ + FALCON_STAT(efx, XgRxOctets, rx_bytes); + FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes); + FALCON_STAT(efx, XgRxPkts, rx_packets); + FALCON_STAT(efx, XgRxPktsOK, rx_good); + FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast); + FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast); + FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast); + FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64); + FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo); + FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo); + FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64); + FALCON_STAT(efx, XgRxDropEvents, rx_overflow); + FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad); + FALCON_STAT(efx, XgRxAlignError, rx_align_error); + FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error); + FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error); + FALCON_STAT(efx, XgRxControlPkts, rx_control); + FALCON_STAT(efx, XgRxPausePkts, rx_pause); + FALCON_STAT(efx, XgRxPkts64Octets, rx_64); + FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127); + FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255); + FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511); + FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023); + FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx); + FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo); + FALCON_STAT(efx, XgRxLengthError, rx_length_error); + FALCON_STAT(efx, XgTxPkts, tx_packets); + FALCON_STAT(efx, XgTxOctets, tx_bytes); + FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast); + FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast); + FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast); + FALCON_STAT(efx, XgTxControlPkts, tx_control); + FALCON_STAT(efx, XgTxPausePkts, tx_pause); + FALCON_STAT(efx, XgTxPkts64Octets, tx_64); + FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127); + FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255); + FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511); + FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023); + FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx); + FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo); + FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64); + FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo); + FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp); + FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error); + FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); + + /* Update derived statistics */ + efx_update_diff_stat(&mac_stats->tx_good_bytes, + mac_stats->tx_bytes - mac_stats->tx_bad_bytes - + mac_stats->tx_control * 64); + efx_update_diff_stat(&mac_stats->rx_bad_bytes, + mac_stats->rx_bytes - mac_stats->rx_good_bytes - + mac_stats->rx_control * 64); +} + +static void falcon_poll_xmac(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + + if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || + !nic_data->xmac_poll_required) + return; + + nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); + falcon_ack_status_intr(efx); +} + +/************************************************************************** + * * MAC wrapper * ************************************************************************** @@ -529,7 +1064,7 @@ static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) falcon_drain_tx_fifo(efx); } -void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) +static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; efx_oword_t reg; diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c deleted file mode 100644 index 8333865..0000000 --- a/drivers/net/ethernet/sfc/falcon_xmac.c +++ /dev/null @@ -1,362 +0,0 @@ -/**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -#include -#include "net_driver.h" -#include "efx.h" -#include "nic.h" -#include "regs.h" -#include "io.h" -#include "mdio_10g.h" -#include "workarounds.h" - -/************************************************************************** - * - * MAC operations - * - *************************************************************************/ - -/* Configure the XAUI driver that is an output from Falcon */ -void falcon_setup_xaui(struct efx_nic *efx) -{ - efx_oword_t sdctl, txdrv; - - /* Move the XAUI into low power, unless there is no PHY, in - * which case the XAUI will have to drive a cable. */ - if (efx->phy_type == PHY_TYPE_NONE) - return; - - efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); - EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); - efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); - - EFX_POPULATE_OWORD_8(txdrv, - FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, - FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, - FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, - FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, - FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, - FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, - FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, - FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); - efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); -} - -int falcon_reset_xaui(struct efx_nic *efx) -{ - struct falcon_nic_data *nic_data = efx->nic_data; - efx_oword_t reg; - int count; - - /* Don't fetch MAC statistics over an XMAC reset */ - WARN_ON(nic_data->stats_disable_count == 0); - - /* Start reset sequence */ - EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); - efx_writeo(efx, ®, FR_AB_XX_PWR_RST); - - /* Wait up to 10 ms for completion, then reinitialise */ - for (count = 0; count < 1000; count++) { - efx_reado(efx, ®, FR_AB_XX_PWR_RST); - if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && - EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { - falcon_setup_xaui(efx); - return 0; - } - udelay(10); - } - netif_err(efx, hw, efx->net_dev, - "timed out waiting for XAUI/XGXS reset\n"); - return -ETIMEDOUT; -} - -static void falcon_ack_status_intr(struct efx_nic *efx) -{ - struct falcon_nic_data *nic_data = efx->nic_data; - efx_oword_t reg; - - if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) - return; - - /* We expect xgmii faults if the wireside link is down */ - if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) - return; - - /* We can only use this interrupt to signal the negative edge of - * xaui_align [we have to poll the positive edge]. */ - if (nic_data->xmac_poll_required) - return; - - efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); -} - -static bool falcon_xgxs_link_ok(struct efx_nic *efx) -{ - efx_oword_t reg; - bool align_done, link_ok = false; - int sync_status; - - /* Read link status */ - efx_reado(efx, ®, FR_AB_XX_CORE_STAT); - - align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); - sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); - if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) - link_ok = true; - - /* Clear link status ready for next read */ - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); - efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); - - return link_ok; -} - -static bool falcon_xmac_link_ok(struct efx_nic *efx) -{ - /* - * Check MAC's XGXS link status except when using XGMII loopback - * which bypasses the XGXS block. - * If possible, check PHY's XGXS link status except when using - * MAC loopback. - */ - return (efx->loopback_mode == LOOPBACK_XGMII || - falcon_xgxs_link_ok(efx)) && - (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || - LOOPBACK_INTERNAL(efx) || - efx_mdio_phyxgxs_lane_sync(efx)); -} - -static void falcon_reconfigure_xmac_core(struct efx_nic *efx) -{ - unsigned int max_frame_len; - efx_oword_t reg; - bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); - bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); - - /* Configure MAC - cut-thru mode is hard wired on */ - EFX_POPULATE_OWORD_3(reg, - FRF_AB_XM_RX_JUMBO_MODE, 1, - FRF_AB_XM_TX_STAT_EN, 1, - FRF_AB_XM_RX_STAT_EN, 1); - efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); - - /* Configure TX */ - EFX_POPULATE_OWORD_6(reg, - FRF_AB_XM_TXEN, 1, - FRF_AB_XM_TX_PRMBL, 1, - FRF_AB_XM_AUTO_PAD, 1, - FRF_AB_XM_TXCRC, 1, - FRF_AB_XM_FCNTL, tx_fc, - FRF_AB_XM_IPG, 0x3); - efx_writeo(efx, ®, FR_AB_XM_TX_CFG); - - /* Configure RX */ - EFX_POPULATE_OWORD_5(reg, - FRF_AB_XM_RXEN, 1, - FRF_AB_XM_AUTO_DEPAD, 0, - FRF_AB_XM_ACPT_ALL_MCAST, 1, - FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, - FRF_AB_XM_PASS_CRC_ERR, 1); - efx_writeo(efx, ®, FR_AB_XM_RX_CFG); - - /* Set frame length */ - max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); - EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); - efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); - EFX_POPULATE_OWORD_2(reg, - FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, - FRF_AB_XM_TX_JUMBO_MODE, 1); - efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); - - EFX_POPULATE_OWORD_2(reg, - FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ - FRF_AB_XM_DIS_FCNTL, !rx_fc); - efx_writeo(efx, ®, FR_AB_XM_FC); - - /* Set MAC address */ - memcpy(®, &efx->net_dev->dev_addr[0], 4); - efx_writeo(efx, ®, FR_AB_XM_ADR_LO); - memcpy(®, &efx->net_dev->dev_addr[4], 2); - efx_writeo(efx, ®, FR_AB_XM_ADR_HI); -} - -static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) -{ - efx_oword_t reg; - bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); - bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); - bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); - - /* XGXS block is flaky and will need to be reset if moving - * into our out of XGMII, XGXS or XAUI loopbacks. */ - if (EFX_WORKAROUND_5147(efx)) { - bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; - bool reset_xgxs; - - efx_reado(efx, ®, FR_AB_XX_CORE_STAT); - old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); - old_xgmii_loopback = - EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); - - efx_reado(efx, ®, FR_AB_XX_SD_CTL); - old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); - - /* The PHY driver may have turned XAUI off */ - reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || - (xaui_loopback != old_xaui_loopback) || - (xgmii_loopback != old_xgmii_loopback)); - - if (reset_xgxs) - falcon_reset_xaui(efx); - } - - efx_reado(efx, ®, FR_AB_XX_CORE_STAT); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, - (xgxs_loopback || xaui_loopback) ? - FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); - efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); - - efx_reado(efx, ®, FR_AB_XX_SD_CTL); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); - EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); - efx_writeo(efx, ®, FR_AB_XX_SD_CTL); -} - - -/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ -static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) -{ - bool mac_up = falcon_xmac_link_ok(efx); - - if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || - efx_phy_mode_disabled(efx->phy_mode)) - /* XAUI link is expected to be down */ - return mac_up; - - falcon_stop_nic_stats(efx); - - while (!mac_up && tries) { - netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); - falcon_reset_xaui(efx); - udelay(200); - - mac_up = falcon_xmac_link_ok(efx); - --tries; - } - - falcon_start_nic_stats(efx); - - return mac_up; -} - -bool falcon_xmac_check_fault(struct efx_nic *efx) -{ - return !falcon_xmac_link_ok_retry(efx, 5); -} - -int falcon_reconfigure_xmac(struct efx_nic *efx) -{ - struct falcon_nic_data *nic_data = efx->nic_data; - - falcon_reconfigure_xgxs_core(efx); - falcon_reconfigure_xmac_core(efx); - - falcon_reconfigure_mac_wrapper(efx); - - nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); - falcon_ack_status_intr(efx); - - return 0; -} - -void falcon_update_stats_xmac(struct efx_nic *efx) -{ - struct efx_mac_stats *mac_stats = &efx->mac_stats; - - /* Update MAC stats from DMAed values */ - FALCON_STAT(efx, XgRxOctets, rx_bytes); - FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes); - FALCON_STAT(efx, XgRxPkts, rx_packets); - FALCON_STAT(efx, XgRxPktsOK, rx_good); - FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast); - FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast); - FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast); - FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64); - FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo); - FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo); - FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64); - FALCON_STAT(efx, XgRxDropEvents, rx_overflow); - FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad); - FALCON_STAT(efx, XgRxAlignError, rx_align_error); - FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error); - FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error); - FALCON_STAT(efx, XgRxControlPkts, rx_control); - FALCON_STAT(efx, XgRxPausePkts, rx_pause); - FALCON_STAT(efx, XgRxPkts64Octets, rx_64); - FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127); - FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255); - FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511); - FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023); - FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx); - FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo); - FALCON_STAT(efx, XgRxLengthError, rx_length_error); - FALCON_STAT(efx, XgTxPkts, tx_packets); - FALCON_STAT(efx, XgTxOctets, tx_bytes); - FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast); - FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast); - FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast); - FALCON_STAT(efx, XgTxControlPkts, tx_control); - FALCON_STAT(efx, XgTxPausePkts, tx_pause); - FALCON_STAT(efx, XgTxPkts64Octets, tx_64); - FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127); - FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255); - FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511); - FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023); - FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx); - FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo); - FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64); - FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo); - FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp); - FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error); - FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); - - /* Update derived statistics */ - efx_update_diff_stat(&mac_stats->tx_good_bytes, - mac_stats->tx_bytes - mac_stats->tx_bad_bytes - - mac_stats->tx_control * 64); - efx_update_diff_stat(&mac_stats->rx_bad_bytes, - mac_stats->rx_bytes - mac_stats->rx_good_bytes - - mac_stats->rx_control * 64); -} - -void falcon_poll_xmac(struct efx_nic *efx) -{ - struct falcon_nic_data *nic_data = efx->nic_data; - - if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || - !nic_data->xmac_poll_required) - return; - - nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); - falcon_ack_status_intr(efx); -} diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index a784363..0d38cc2 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -297,12 +297,6 @@ extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); extern void efx_nic_eventq_read_ack(struct efx_channel *channel); extern bool efx_nic_event_present(struct efx_channel *channel); -/* MAC/PHY */ -extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); -extern bool falcon_xmac_check_fault(struct efx_nic *efx); -extern int falcon_reconfigure_xmac(struct efx_nic *efx); -extern void falcon_update_stats_xmac(struct efx_nic *efx); - /* Some statistics are computed as A - B where A and B each increase * linearly with some hardware counter(s) and the counters are read * asynchronously. If the counters contributing to B are always read @@ -347,7 +341,6 @@ extern void siena_prepare_flush(struct efx_nic *efx); extern void siena_finish_flush(struct efx_nic *efx); extern void falcon_start_nic_stats(struct efx_nic *efx); extern void falcon_stop_nic_stats(struct efx_nic *efx); -extern void falcon_setup_xaui(struct efx_nic *efx); extern int falcon_reset_xaui(struct efx_nic *efx); extern void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); @@ -370,43 +363,9 @@ extern int efx_nic_test_registers(struct efx_nic *efx, extern size_t efx_nic_get_regs_len(struct efx_nic *efx); extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); -/************************************************************************** - * - * Falcon MAC stats - * - ************************************************************************** - */ - -#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) -#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) - -/* Retrieve statistic from statistics block */ -#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ - if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ - (efx)->mac_stats.efx_stat += le16_to_cpu( \ - *((__force __le16 *) \ - (efx->stats_buffer.addr + \ - FALCON_STAT_OFFSET(falcon_stat)))); \ - else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ - (efx)->mac_stats.efx_stat += le32_to_cpu( \ - *((__force __le32 *) \ - (efx->stats_buffer.addr + \ - FALCON_STAT_OFFSET(falcon_stat)))); \ - else \ - (efx)->mac_stats.efx_stat += le64_to_cpu( \ - *((__force __le64 *) \ - (efx->stats_buffer.addr + \ - FALCON_STAT_OFFSET(falcon_stat)))); \ - } while (0) - -#define FALCON_MAC_STATS_SIZE 0x100 - -#define MAC_DATA_LBN 0 -#define MAC_DATA_WIDTH 32 +#define EFX_MAX_FLUSH_TIME 5000 extern void efx_generate_event(struct efx_nic *efx, unsigned int evq, efx_qword_t *event); -extern void falcon_poll_xmac(struct efx_nic *efx); - #endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/regs.h index ade4c4d..27ad348d 100644 --- a/drivers/net/ethernet/sfc/regs.h +++ b/drivers/net/ethernet/sfc/regs.h @@ -2925,264 +2925,4 @@ #define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 #define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 -/************************************************************************** - * - * Falcon MAC stats - * - ************************************************************************** - * - */ - -#define GRxGoodOct_offset 0x0 -#define GRxGoodOct_WIDTH 48 -#define GRxBadOct_offset 0x8 -#define GRxBadOct_WIDTH 48 -#define GRxMissPkt_offset 0x10 -#define GRxMissPkt_WIDTH 32 -#define GRxFalseCRS_offset 0x14 -#define GRxFalseCRS_WIDTH 32 -#define GRxPausePkt_offset 0x18 -#define GRxPausePkt_WIDTH 32 -#define GRxBadPkt_offset 0x1C -#define GRxBadPkt_WIDTH 32 -#define GRxUcastPkt_offset 0x20 -#define GRxUcastPkt_WIDTH 32 -#define GRxMcastPkt_offset 0x24 -#define GRxMcastPkt_WIDTH 32 -#define GRxBcastPkt_offset 0x28 -#define GRxBcastPkt_WIDTH 32 -#define GRxGoodLt64Pkt_offset 0x2C -#define GRxGoodLt64Pkt_WIDTH 32 -#define GRxBadLt64Pkt_offset 0x30 -#define GRxBadLt64Pkt_WIDTH 32 -#define GRx64Pkt_offset 0x34 -#define GRx64Pkt_WIDTH 32 -#define GRx65to127Pkt_offset 0x38 -#define GRx65to127Pkt_WIDTH 32 -#define GRx128to255Pkt_offset 0x3C -#define GRx128to255Pkt_WIDTH 32 -#define GRx256to511Pkt_offset 0x40 -#define GRx256to511Pkt_WIDTH 32 -#define GRx512to1023Pkt_offset 0x44 -#define GRx512to1023Pkt_WIDTH 32 -#define GRx1024to15xxPkt_offset 0x48 -#define GRx1024to15xxPkt_WIDTH 32 -#define GRx15xxtoJumboPkt_offset 0x4C -#define GRx15xxtoJumboPkt_WIDTH 32 -#define GRxGtJumboPkt_offset 0x50 -#define GRxGtJumboPkt_WIDTH 32 -#define GRxFcsErr64to15xxPkt_offset 0x54 -#define GRxFcsErr64to15xxPkt_WIDTH 32 -#define GRxFcsErr15xxtoJumboPkt_offset 0x58 -#define GRxFcsErr15xxtoJumboPkt_WIDTH 32 -#define GRxFcsErrGtJumboPkt_offset 0x5C -#define GRxFcsErrGtJumboPkt_WIDTH 32 -#define GTxGoodBadOct_offset 0x80 -#define GTxGoodBadOct_WIDTH 48 -#define GTxGoodOct_offset 0x88 -#define GTxGoodOct_WIDTH 48 -#define GTxSglColPkt_offset 0x90 -#define GTxSglColPkt_WIDTH 32 -#define GTxMultColPkt_offset 0x94 -#define GTxMultColPkt_WIDTH 32 -#define GTxExColPkt_offset 0x98 -#define GTxExColPkt_WIDTH 32 -#define GTxDefPkt_offset 0x9C -#define GTxDefPkt_WIDTH 32 -#define GTxLateCol_offset 0xA0 -#define GTxLateCol_WIDTH 32 -#define GTxExDefPkt_offset 0xA4 -#define GTxExDefPkt_WIDTH 32 -#define GTxPausePkt_offset 0xA8 -#define GTxPausePkt_WIDTH 32 -#define GTxBadPkt_offset 0xAC -#define GTxBadPkt_WIDTH 32 -#define GTxUcastPkt_offset 0xB0 -#define GTxUcastPkt_WIDTH 32 -#define GTxMcastPkt_offset 0xB4 -#define GTxMcastPkt_WIDTH 32 -#define GTxBcastPkt_offset 0xB8 -#define GTxBcastPkt_WIDTH 32 -#define GTxLt64Pkt_offset 0xBC -#define GTxLt64Pkt_WIDTH 32 -#define GTx64Pkt_offset 0xC0 -#define GTx64Pkt_WIDTH 32 -#define GTx65to127Pkt_offset 0xC4 -#define GTx65to127Pkt_WIDTH 32 -#define GTx128to255Pkt_offset 0xC8 -#define GTx128to255Pkt_WIDTH 32 -#define GTx256to511Pkt_offset 0xCC -#define GTx256to511Pkt_WIDTH 32 -#define GTx512to1023Pkt_offset 0xD0 -#define GTx512to1023Pkt_WIDTH 32 -#define GTx1024to15xxPkt_offset 0xD4 -#define GTx1024to15xxPkt_WIDTH 32 -#define GTx15xxtoJumboPkt_offset 0xD8 -#define GTx15xxtoJumboPkt_WIDTH 32 -#define GTxGtJumboPkt_offset 0xDC -#define GTxGtJumboPkt_WIDTH 32 -#define GTxNonTcpUdpPkt_offset 0xE0 -#define GTxNonTcpUdpPkt_WIDTH 16 -#define GTxMacSrcErrPkt_offset 0xE4 -#define GTxMacSrcErrPkt_WIDTH 16 -#define GTxIpSrcErrPkt_offset 0xE8 -#define GTxIpSrcErrPkt_WIDTH 16 -#define GDmaDone_offset 0xEC -#define GDmaDone_WIDTH 32 - -#define XgRxOctets_offset 0x0 -#define XgRxOctets_WIDTH 48 -#define XgRxOctetsOK_offset 0x8 -#define XgRxOctetsOK_WIDTH 48 -#define XgRxPkts_offset 0x10 -#define XgRxPkts_WIDTH 32 -#define XgRxPktsOK_offset 0x14 -#define XgRxPktsOK_WIDTH 32 -#define XgRxBroadcastPkts_offset 0x18 -#define XgRxBroadcastPkts_WIDTH 32 -#define XgRxMulticastPkts_offset 0x1C -#define XgRxMulticastPkts_WIDTH 32 -#define XgRxUnicastPkts_offset 0x20 -#define XgRxUnicastPkts_WIDTH 32 -#define XgRxUndersizePkts_offset 0x24 -#define XgRxUndersizePkts_WIDTH 32 -#define XgRxOversizePkts_offset 0x28 -#define XgRxOversizePkts_WIDTH 32 -#define XgRxJabberPkts_offset 0x2C -#define XgRxJabberPkts_WIDTH 32 -#define XgRxUndersizeFCSerrorPkts_offset 0x30 -#define XgRxUndersizeFCSerrorPkts_WIDTH 32 -#define XgRxDropEvents_offset 0x34 -#define XgRxDropEvents_WIDTH 32 -#define XgRxFCSerrorPkts_offset 0x38 -#define XgRxFCSerrorPkts_WIDTH 32 -#define XgRxAlignError_offset 0x3C -#define XgRxAlignError_WIDTH 32 -#define XgRxSymbolError_offset 0x40 -#define XgRxSymbolError_WIDTH 32 -#define XgRxInternalMACError_offset 0x44 -#define XgRxInternalMACError_WIDTH 32 -#define XgRxControlPkts_offset 0x48 -#define XgRxControlPkts_WIDTH 32 -#define XgRxPausePkts_offset 0x4C -#define XgRxPausePkts_WIDTH 32 -#define XgRxPkts64Octets_offset 0x50 -#define XgRxPkts64Octets_WIDTH 32 -#define XgRxPkts65to127Octets_offset 0x54 -#define XgRxPkts65to127Octets_WIDTH 32 -#define XgRxPkts128to255Octets_offset 0x58 -#define XgRxPkts128to255Octets_WIDTH 32 -#define XgRxPkts256to511Octets_offset 0x5C -#define XgRxPkts256to511Octets_WIDTH 32 -#define XgRxPkts512to1023Octets_offset 0x60 -#define XgRxPkts512to1023Octets_WIDTH 32 -#define XgRxPkts1024to15xxOctets_offset 0x64 -#define XgRxPkts1024to15xxOctets_WIDTH 32 -#define XgRxPkts15xxtoMaxOctets_offset 0x68 -#define XgRxPkts15xxtoMaxOctets_WIDTH 32 -#define XgRxLengthError_offset 0x6C -#define XgRxLengthError_WIDTH 32 -#define XgTxPkts_offset 0x80 -#define XgTxPkts_WIDTH 32 -#define XgTxOctets_offset 0x88 -#define XgTxOctets_WIDTH 48 -#define XgTxMulticastPkts_offset 0x90 -#define XgTxMulticastPkts_WIDTH 32 -#define XgTxBroadcastPkts_offset 0x94 -#define XgTxBroadcastPkts_WIDTH 32 -#define XgTxUnicastPkts_offset 0x98 -#define XgTxUnicastPkts_WIDTH 32 -#define XgTxControlPkts_offset 0x9C -#define XgTxControlPkts_WIDTH 32 -#define XgTxPausePkts_offset 0xA0 -#define XgTxPausePkts_WIDTH 32 -#define XgTxPkts64Octets_offset 0xA4 -#define XgTxPkts64Octets_WIDTH 32 -#define XgTxPkts65to127Octets_offset 0xA8 -#define XgTxPkts65to127Octets_WIDTH 32 -#define XgTxPkts128to255Octets_offset 0xAC -#define XgTxPkts128to255Octets_WIDTH 32 -#define XgTxPkts256to511Octets_offset 0xB0 -#define XgTxPkts256to511Octets_WIDTH 32 -#define XgTxPkts512to1023Octets_offset 0xB4 -#define XgTxPkts512to1023Octets_WIDTH 32 -#define XgTxPkts1024to15xxOctets_offset 0xB8 -#define XgTxPkts1024to15xxOctets_WIDTH 32 -#define XgTxPkts1519toMaxOctets_offset 0xBC -#define XgTxPkts1519toMaxOctets_WIDTH 32 -#define XgTxUndersizePkts_offset 0xC0 -#define XgTxUndersizePkts_WIDTH 32 -#define XgTxOversizePkts_offset 0xC4 -#define XgTxOversizePkts_WIDTH 32 -#define XgTxNonTcpUdpPkt_offset 0xC8 -#define XgTxNonTcpUdpPkt_WIDTH 16 -#define XgTxMacSrcErrPkt_offset 0xCC -#define XgTxMacSrcErrPkt_WIDTH 16 -#define XgTxIpSrcErrPkt_offset 0xD0 -#define XgTxIpSrcErrPkt_WIDTH 16 -#define XgDmaDone_offset 0xD4 -#define XgDmaDone_WIDTH 32 - -#define FALCON_STATS_NOT_DONE 0x00000000 -#define FALCON_STATS_DONE 0xffffffff - -/************************************************************************** - * - * Falcon non-volatile configuration - * - ************************************************************************** - */ - -/* Board configuration v2 (v1 is obsolete; later versions are compatible) */ -struct falcon_nvconfig_board_v2 { - __le16 nports; - u8 port0_phy_addr; - u8 port0_phy_type; - u8 port1_phy_addr; - u8 port1_phy_type; - __le16 asic_sub_revision; - __le16 board_revision; -} __packed; - -/* Board configuration v3 extra information */ -struct falcon_nvconfig_board_v3 { - __le32 spi_device_type[2]; -} __packed; - -/* Bit numbers for spi_device_type */ -#define SPI_DEV_TYPE_SIZE_LBN 0 -#define SPI_DEV_TYPE_SIZE_WIDTH 5 -#define SPI_DEV_TYPE_ADDR_LEN_LBN 6 -#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2 -#define SPI_DEV_TYPE_ERASE_CMD_LBN 8 -#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8 -#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16 -#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5 -#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24 -#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5 -#define SPI_DEV_TYPE_FIELD(type, field) \ - (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field))) - -#define FALCON_NVCONFIG_OFFSET 0x300 - -#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C -struct falcon_nvconfig { - efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ - u8 mac_address[2][8]; /* 0x310 */ - efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ - efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */ - efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ - efx_oword_t hw_init_reg; /* 0x350 */ - efx_oword_t nic_stat_reg; /* 0x360 */ - efx_oword_t glb_ctl_reg; /* 0x370 */ - efx_oword_t srm_cfg_reg; /* 0x380 */ - efx_oword_t spare_reg; /* 0x390 */ - __le16 board_magic_num; /* 0x3A0 */ - __le16 board_struct_ver; - __le16 board_checksum; - struct falcon_nvconfig_board_v2 board_v2; - efx_oword_t ee_base_page_reg; /* 0x3B0 */ - struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */ -} __packed; - #endif /* EFX_REGS_H */ -- cgit v0.10.2 From 59cfc479b2c8ba344c8497d5c913b6cba2ce3755 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Sep 2012 17:30:10 +0100 Subject: sfc: Introduce and use MCDI_DECLARE_BUF macro MCDI_DECLARE_BUF declares a variable as an MCDI buffer of the requested length, adding any necessary padding. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 97dd8f18..d65b562 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -606,7 +606,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) { - u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); size_t outlength; const __le16 *ver_words; int rc; @@ -637,8 +637,8 @@ fail: int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached) { - u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; - u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); size_t outlen; int rc; @@ -667,7 +667,7 @@ fail: int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, u16 *fw_subtype_list, u32 *capabilities) { - uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); size_t outlen, offset, i; int port_num = efx_port_num(efx); int rc; @@ -721,7 +721,7 @@ fail: int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) { - u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); u32 dest = 0; int rc; @@ -749,7 +749,7 @@ fail: int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) { - u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); size_t outlen; int rc; @@ -777,8 +777,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, bool *protected_out) { - u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; - u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); size_t outlen; int rc; @@ -806,7 +806,7 @@ fail: int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) { - u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); @@ -828,8 +828,9 @@ fail: int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, loff_t offset, u8 *buffer, size_t length) { - u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; - u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, + MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); size_t outlen; int rc; @@ -853,7 +854,8 @@ fail: int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, loff_t offset, const u8 *buffer, size_t length) { - u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); int rc; MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); @@ -879,7 +881,7 @@ fail: int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset, size_t length) { - u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); @@ -902,7 +904,7 @@ fail: int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) { - u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); @@ -923,8 +925,8 @@ fail: static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) { - u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; - u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); @@ -976,8 +978,8 @@ fail1: static int efx_mcdi_read_assertion(struct efx_nic *efx) { - u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; - u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); unsigned int flags, index, ofst; const char *reason; size_t outlen; @@ -1032,7 +1034,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) static void efx_mcdi_exit_assertion(struct efx_nic *efx) { - u8 inbuf[MC_CMD_REBOOT_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); /* If the MC is running debug firmware, it might now be * waiting for a debugger to attach, but we just want it to @@ -1062,7 +1064,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx) void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { - u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); int rc; BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); @@ -1091,7 +1093,7 @@ int efx_mcdi_reset_port(struct efx_nic *efx) int efx_mcdi_reset_mc(struct efx_nic *efx) { - u8 inbuf[MC_CMD_REBOOT_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); int rc; BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); @@ -1110,8 +1112,8 @@ int efx_mcdi_reset_mc(struct efx_nic *efx) static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, const u8 *mac, int *id_out) { - u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; - u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); size_t outlen; int rc; @@ -1151,7 +1153,7 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) { - u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); size_t outlen; int rc; @@ -1178,7 +1180,7 @@ fail: int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) { - u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 3ba2e5b..c881118 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -85,6 +85,9 @@ extern void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); +#define MCDI_DECLARE_BUF(_name, _len) \ + u8 _name[ALIGN(_len, 4)] + #define MCDI_PTR2(_buf, _ofst) \ (((u8 *)_buf) + _ofst) #define MCDI_SET_DWORD2(_buf, _ofst, _value) \ diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c index 1003f30..fafdc8e 100644 --- a/drivers/net/ethernet/sfc/mcdi_mac.c +++ b/drivers/net/ethernet/sfc/mcdi_mac.c @@ -15,7 +15,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx) { u32 reject, fcntl; - u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN]; + MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST, efx->net_dev->dev_addr, ETH_ALEN); @@ -55,7 +55,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx) bool efx_mcdi_mac_check_fault(struct efx_nic *efx) { - u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); size_t outlength; int rc; @@ -75,7 +75,7 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx) int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, u32 dma_len, int enable, int clear) { - u8 inbuf[MC_CMD_MAC_STATS_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); int rc; efx_dword_t *cmd_ptr; int period = enable ? 1000 : 0; @@ -115,16 +115,21 @@ fail: int efx_mcdi_mac_reconfigure(struct efx_nic *efx) { + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); int rc; + BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN != + MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + + sizeof(efx->multicast_hash)); + WARN_ON(!mutex_is_locked(&efx->mac_lock)); rc = efx_mcdi_set_mac(efx); if (rc != 0) return rc; + memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), + efx->multicast_hash.byte, sizeof(efx->multicast_hash)); return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, - efx->multicast_hash.byte, - sizeof(efx->multicast_hash), - NULL, 0, NULL); + inbuf, sizeof(inbuf), NULL, 0, NULL); } diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index fe42c3b..4e8a138 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -92,7 +92,7 @@ struct efx_mcdi_mon_attribute { static int efx_mcdi_mon_update(struct efx_nic *efx) { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); - u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO, @@ -236,7 +236,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0; - u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); size_t outlen; char name[12]; u32 mask; diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c index 13cb40f..37b6ed9 100644 --- a/drivers/net/ethernet/sfc/mcdi_phy.c +++ b/drivers/net/ethernet/sfc/mcdi_phy.c @@ -36,7 +36,7 @@ struct efx_mcdi_phy_data { static int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) { - u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); size_t outlen; int rc; @@ -78,7 +78,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, u32 flags, u32 loopback_mode, u32 loopback_speed) { - u8 inbuf[MC_CMD_SET_LINK_IN_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); int rc; BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); @@ -102,7 +102,7 @@ fail: static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) { - u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); size_t outlen; int rc; @@ -129,8 +129,8 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, unsigned int prtad, unsigned int devad, u16 addr, u16 *value_out, u32 *status_out) { - u8 inbuf[MC_CMD_MDIO_READ_IN_LEN]; - u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); size_t outlen; int rc; @@ -157,8 +157,8 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, unsigned int prtad, unsigned int devad, u16 addr, u16 value, u32 *status_out) { - u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN]; - u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); size_t outlen; int rc; @@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media) static int efx_mcdi_phy_probe(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_data; - u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); u32 caps; int rc; @@ -472,7 +472,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) static bool efx_mcdi_phy_poll(struct efx_nic *efx) { struct efx_link_state old_state = efx->link_state; - u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); int rc; WARN_ON(!mutex_is_locked(&efx->mac_lock)); @@ -507,7 +507,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx) static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); int rc; ecmd->supported = @@ -579,7 +579,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec static int efx_mcdi_phy_test_alive(struct efx_nic *efx) { - u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); size_t outlen; int rc; @@ -744,8 +744,8 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data) { - u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX]; - u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN]; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); size_t outlen; int rc; unsigned int payload_len; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index b495394..f79f6fb 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -311,7 +311,7 @@ static int efx_phc_enable(struct ptp_clock_info *ptp, /* Enable MCDI PTP support. */ static int efx_ptp_enable(struct efx_nic *efx) { - u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, @@ -329,7 +329,7 @@ static int efx_ptp_enable(struct efx_nic *efx) */ static int efx_ptp_disable(struct efx_nic *efx) { - u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), @@ -518,7 +518,7 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) { struct efx_ptp_data *ptp = efx->ptp_data; - u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX]; + MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); size_t response_length; int rc; unsigned long timeout; @@ -569,7 +569,7 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) int rc = -EIO; /* MCDI driver requires word aligned lengths */ size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4); - u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN]; + MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); @@ -1359,7 +1359,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) struct efx_ptp_data, phc_clock_info); struct efx_nic *efx = ptp_data->channel->efx; - u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN]; + MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); s64 adjustment_ns; int rc; @@ -1394,7 +1394,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) phc_clock_info); struct efx_nic *efx = ptp_data->channel->efx; struct timespec delta_ts = ns_to_timespec(delta); - u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0); @@ -1411,8 +1411,8 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) struct efx_ptp_data, phc_clock_info); struct efx_nic *efx = ptp_data->channel->efx; - u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN]; - u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); int rc; MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 90f8d16..8e46fac 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -197,8 +197,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index) static int efx_sriov_cmd(struct efx_nic *efx, bool enable, unsigned *vi_scale_out, unsigned *vf_total_out) { - u8 inbuf[MC_CMD_SRIOV_IN_LEN]; - u8 outbuf[MC_CMD_SRIOV_OUT_LEN]; + MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); unsigned vi_scale, vf_total; size_t outlen; int rc; -- cgit v0.10.2 From 30af691066dccba9d5e44c6656030faec25458be Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Sep 2012 17:30:44 +0100 Subject: sfc: Rationalise MCDI buffer accessors Add _MCDI_DWORD() which yields an lvalue for the given dword field and change MCDI_DWORD(), MCDI_SET_DWORD() and MCDI_QWORD() to use it. Fold the rather trivial MCDI_PTR2() into MCDI_PTR() and _MCDI_DWORD(). Remove MCDI_SET_DWORD2() and MCDI_QWORD2(). MCDI_DWORD2() should also go, but it still has one user which we'll get rid of later. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index c881118..03fb6c0 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -87,34 +87,25 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define MCDI_DECLARE_BUF(_name, _len) \ u8 _name[ALIGN(_len, 4)] +#define MCDI_PTR(_buf, _field) \ + ((u8 *)(_buf) + MC_CMD_ ## _field ## _OFST) +#define _MCDI_DWORD(_buf, _field) \ + ((efx_dword_t *)MCDI_PTR(_buf, _field)) + +#define MCDI_DWORD2(_buf, _ofst) \ + EFX_DWORD_FIELD(*(efx_dword_t *)((u8 *)(_buf) + (_ofst)), EFX_DWORD_0) + +#define MCDI_SET_DWORD(_buf, _field, _value) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) +#define MCDI_DWORD(_buf, _field) \ + EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_QWORD(_buf, _field) \ + (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ + (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) -#define MCDI_PTR2(_buf, _ofst) \ - (((u8 *)_buf) + _ofst) -#define MCDI_SET_DWORD2(_buf, _ofst, _value) \ - EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ - EFX_DWORD_0, _value) -#define MCDI_DWORD2(_buf, _ofst) \ - EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ - EFX_DWORD_0) -#define MCDI_QWORD2(_buf, _ofst) \ - EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \ - EFX_QWORD_0) - -#define MCDI_PTR(_buf, _ofst) \ - MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) #define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \ - MCDI_PTR2(_buf, \ - MC_CMD_ ## _field ## _OFST + \ - (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN) -#define MCDI_SET_DWORD(_buf, _ofst, _value) \ - MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) -#define MCDI_DWORD(_buf, _ofst) \ - MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) -#define MCDI_QWORD(_buf, _ofst) \ - MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) - -#define MCDI_EVENT_FIELD(_ev, _field) \ - EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) + (MCDI_PTR(_buf, _field) + \ + (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN) #define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ EFX_EXTRACT_DWORD( \ *((efx_dword_t *) \ @@ -124,6 +115,9 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \ MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1) +#define MCDI_EVENT_FIELD(_ev, _field) \ + EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) + extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); -- cgit v0.10.2 From a649dfcb48ea60490d9bb06fbeb65baaf59524e3 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Sep 2012 17:30:58 +0100 Subject: sfc: Fill out the set of MCDI accessors We need to access arrays of 16-bit words and 32-bit dwords in MCDI buffers based on the MCDI protocol definitions. We should also be able to read and write fields within structures, without specifying an array index each time. So add MCDI_FIELD() and make MCDI_ARRAY_FIELD() use it. Also add MCDI_SET_FIELD(). Split MCDI_ARRAY_PTR() into MCDI_ARRAY_STRUCT_PTR() and _MCDI_ARRAY_PTR(), which are currently identical but will diverge in later changes. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 03fb6c0..969ecc0 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -87,8 +87,10 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define MCDI_DECLARE_BUF(_name, _len) \ u8 _name[ALIGN(_len, 4)] +#define _MCDI_PTR(_buf, _offset) \ + ((u8 *)(_buf) + (_offset)) #define MCDI_PTR(_buf, _field) \ - ((u8 *)(_buf) + MC_CMD_ ## _field ## _OFST) + _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) #define _MCDI_DWORD(_buf, _field) \ ((efx_dword_t *)MCDI_PTR(_buf, _field)) @@ -102,18 +104,38 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define MCDI_QWORD(_buf, _field) \ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) +#define MCDI_FIELD(_ptr, _type, _field) \ + EFX_EXTRACT_DWORD( \ + *(efx_dword_t *) \ + _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\ + MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \ + (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \ + MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1) -#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \ +#define _MCDI_ARRAY_PTR(_buf, _field, _index) \ (MCDI_PTR(_buf, _field) + \ - (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN) + (_index) * MC_CMD_ ## _field ## _LEN) +#define MCDI_DECLARE_STRUCT_PTR(_name) \ + u8 *_name +#define MCDI_ARRAY_STRUCT_PTR _MCDI_ARRAY_PTR +#define MCDI_VAR_ARRAY_LEN(_len, _field) \ + min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \ + ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN) +#define MCDI_ARRAY_WORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *) \ + _MCDI_ARRAY_PTR(_buf, _field, _index))) +#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index)) +#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \ + EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \ + EFX_DWORD_0, _value) +#define MCDI_ARRAY_DWORD(_buf, _field, _index) \ + EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0) #define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ - EFX_EXTRACT_DWORD( \ - *((efx_dword_t *) \ - (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \ - (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \ - MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \ - (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \ - MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1) + MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \ + _type ## _TYPEDEF, _field2) #define MCDI_EVENT_FIELD(_ev, _field) \ EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) -- cgit v0.10.2 From d0c2ee99e54c0fd76938236e863ad7d3992f044f Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 20 Aug 2013 15:47:12 +0100 Subject: sfc: Introduce and use MCDI_CTL_SDU_LEN_MAX_V1 macro for Siena-specific code The MCDI version 2 protocol supports larger payloads, but will not be implemented on Siena. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index c5c9747..5f2846c 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -100,7 +100,9 @@ #define MCDI_HEADER_XFLAGS_EVREQ 0x01 /* Maximum number of payload bytes */ -#define MCDI_CTL_SDU_LEN_MAX 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc + +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1 /* The MC can generate events for two reasons: * - To complete a shared memory request if XFLAGS_EVREQ was set diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 8e46fac..c376e90 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -248,11 +248,11 @@ static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, mb(); /* Finish writing source/reading dest before DMA starts */ used = MC_CMD_MEMCPY_IN_LEN(count); - if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX)) + if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX_V1)) return -ENOBUFS; /* Allocate room for the largest request */ - inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL); + inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX_V1, GFP_KERNEL); if (inbuf == NULL) return -ENOMEM; @@ -270,7 +270,8 @@ static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, from_lo = (u32)req->from_addr; from_hi = (u32)(req->from_addr >> 32); } else { - if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) { + if (WARN_ON(used + req->length > + MCDI_CTL_SDU_LEN_MAX_V1)) { rc = -ENOBUFS; goto out; } -- cgit v0.10.2 From c5bb0e9891ba1f7c871adc09d9ef727e1c0c1c1e Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Sep 2012 17:31:33 +0100 Subject: sfc: Use proper macros to declare and access MCDI arrays A few functions are using heap buffers; change them to use stack buffers as we really don't need to resort to the heap for a 252 byte buffer in process context. MC_CMD_MEMCPY is quite weird in that it can use inline data placed in the request buffer after the array of records. Thus there are two variable-length arrays and we can't use the normal accessors for the second. So we have to use _MCDI_PTR() in efx_sriov_memcpy(). Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d65b562..d6d1ff1 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -668,7 +668,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, u16 *fw_subtype_list, u32 *capabilities) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); - size_t outlen, offset, i; + size_t outlen, i; int port_num = efx_port_num(efx); int rc; @@ -684,22 +684,21 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, goto fail; } - offset = (port_num) - ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST - : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; if (mac_address) - memcpy(mac_address, outbuf + offset, ETH_ALEN); + memcpy(mac_address, + port_num ? + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0), + ETH_ALEN); if (fw_subtype_list) { - /* Byte-swap and truncate or zero-pad as necessary */ - offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST; for (i = 0; - i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; - i++) { - fw_subtype_list[i] = - (offset + 2 <= outlen) ? - le16_to_cpup((__le16 *)(outbuf + offset)) : 0; - offset += 2; - } + i < MCDI_VAR_ARRAY_LEN(outlen, + GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); + i++) + fw_subtype_list[i] = MCDI_ARRAY_WORD( + outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); + for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) + fw_subtype_list[i] = 0; } if (capabilities) { if (port_num) @@ -980,7 +979,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); - unsigned int flags, index, ofst; + unsigned int flags, index; const char *reason; size_t outlen; int retry; @@ -1022,12 +1021,13 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); /* Print out the registers */ - ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; - for (index = 1; index < 32; index++) { - netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, - MCDI_DWORD2(outbuf, ofst)); - ofst += sizeof(efx_dword_t); - } + for (index = 0; + index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; + index++) + netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", + 1 + index, + MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, + index)); return 0; } @@ -1201,34 +1201,31 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; - __le32 *qid; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); int rc, count; BUILD_BUG_ON(EFX_MAX_CHANNELS > MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); - qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); - if (qid == NULL) - return -ENOMEM; - count = 0; efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) { if (rx_queue->flush_pending) { rx_queue->flush_pending = false; atomic_dec(&efx->rxq_flush_pending); - qid[count++] = cpu_to_le32( - efx_rx_queue_index(rx_queue)); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + count, efx_rx_queue_index(rx_queue)); + count++; } } } - rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, - count * sizeof(*qid), NULL, 0, NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); WARN_ON(rc < 0); - kfree(qid); - return rc; } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 969ecc0..f8ab64f 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -94,9 +94,6 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define _MCDI_DWORD(_buf, _field) \ ((efx_dword_t *)MCDI_PTR(_buf, _field)) -#define MCDI_DWORD2(_buf, _ofst) \ - EFX_DWORD_FIELD(*(efx_dword_t *)((u8 *)(_buf) + (_ofst)), EFX_DWORD_0) - #define MCDI_SET_DWORD(_buf, _field, _value) \ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) #define MCDI_DWORD(_buf, _field) \ diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c index fafdc8e..cf16bf1 100644 --- a/drivers/net/ethernet/sfc/mcdi_mac.c +++ b/drivers/net/ethernet/sfc/mcdi_mac.c @@ -17,7 +17,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx) u32 reject, fcntl; MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); - memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST, + memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), efx->net_dev->dev_addr, ETH_ALEN); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c index 37b6ed9..86c0d21 100644 --- a/drivers/net/ethernet/sfc/mcdi_phy.c +++ b/drivers/net/ethernet/sfc/mcdi_phy.c @@ -615,17 +615,15 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, unsigned int retry, i, count = 0; size_t outlen; u32 status; - u8 *buf, *ptr; + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN); + u8 *ptr; int rc; - buf = kzalloc(0x100, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); - MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode); - rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN, - NULL, 0, NULL); + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); + rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, + inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); if (rc) goto out; @@ -633,11 +631,11 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, for (retry = 0; retry < 100; ++retry) { BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, - buf, 0x100, &outlen); + outbuf, sizeof(outbuf), &outlen); if (rc) goto out; - status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT); + status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); if (status != MC_CMD_POLL_BIST_RUNNING) goto finished; @@ -654,7 +652,7 @@ finished: if (efx->phy_type == PHY_TYPE_SFT9001B && (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { - ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); + ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); if (status == MC_CMD_POLL_BIST_PASSED && outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { for (i = 0; i < 8; i++) { @@ -668,8 +666,6 @@ finished: rc = count; out: - kfree(buf); - return rc; } @@ -785,8 +781,7 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, space_remaining : payload_len; memcpy(user_data, - outbuf + page_off + - MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST, + MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off, to_copy); space_remaining -= to_copy; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index f79f6fb..ec2cf4d 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -294,8 +294,7 @@ struct efx_ptp_data { struct work_struct pps_work; struct workqueue_struct *pps_workwq; bool nic_ts_enabled; - u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN( - MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)]; + MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); struct efx_ptp_timeset timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; }; @@ -396,7 +395,8 @@ static void efx_ptp_send_times(struct efx_nic *efx, } /* Read a timeset from the MC's results and partial process. */ -static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset) +static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), + struct efx_ptp_timeset *timeset) { unsigned start_ns, end_ns; @@ -425,12 +425,14 @@ static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset) * busy. A number of readings are taken so that, hopefully, at least one good * synchronisation will be seen in the results. */ -static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, - size_t response_length, - const struct pps_event_time *last_time) +static int +efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), + size_t response_length, + const struct pps_event_time *last_time) { - unsigned number_readings = (response_length / - MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); + unsigned number_readings = + MCDI_VAR_ARRAY_LEN(response_length, + PTP_OUT_SYNCHRONIZE_TIMESET); unsigned i; unsigned total; unsigned ngood = 0; @@ -447,8 +449,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, * appera to be erroneous. */ for (i = 0; i < number_readings; i++) { - efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); - synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; + efx_ptp_read_timeset( + MCDI_ARRAY_STRUCT_PTR(synch_buf, + PTP_OUT_SYNCHRONIZE_TIMESET, i), + &ptp->timeset[i]); } /* Find the last good host-MC synchronization result. The MC times @@ -564,15 +568,15 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) /* Transmit a PTP packet, via the MCDI interface, to the wire. */ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) { - u8 *txbuf = efx->ptp_data->txbuf; + struct efx_ptp_data *ptp_data = efx->ptp_data; struct skb_shared_hwtstamps timestamps; int rc = -EIO; /* MCDI driver requires word aligned lengths */ size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4); MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); - MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); - MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); if (skb_shinfo(skb)->nr_frags != 0) { rc = skb_linearize(skb); if (rc != 0) @@ -585,9 +589,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) goto fail; } skb_copy_from_linear_data(skb, - &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST], + MCDI_PTR(ptp_data->txbuf, + PTP_IN_TRANSMIT_PACKET), len); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime, + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, ptp_data->txbuf, len, txtime, sizeof(txtime), &len); if (rc != 0) goto fail; diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index c376e90..198044f 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -240,25 +240,22 @@ static void efx_sriov_usrev(struct efx_nic *efx, bool enabled) static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, unsigned int count) { - u8 *inbuf, *record; - unsigned int used; + MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); + MCDI_DECLARE_STRUCT_PTR(record); + unsigned int index, used; u32 from_rid, from_hi, from_lo; int rc; mb(); /* Finish writing source/reading dest before DMA starts */ - used = MC_CMD_MEMCPY_IN_LEN(count); - if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX_V1)) + if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM)) return -ENOBUFS; + used = MC_CMD_MEMCPY_IN_LEN(count); - /* Allocate room for the largest request */ - inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX_V1, GFP_KERNEL); - if (inbuf == NULL) - return -ENOMEM; - - record = inbuf; - MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count); - while (count-- > 0) { + for (index = 0; index < count; index++) { + record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS, + count); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, req->to_rid); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO, @@ -279,7 +276,8 @@ static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; from_lo = used; from_hi = 0; - memcpy(inbuf + used, req->from_buf, req->length); + memcpy(_MCDI_PTR(inbuf, used), req->from_buf, + req->length); used += req->length; } @@ -292,13 +290,10 @@ static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, req->length); ++req; - record += MC_CMD_MEMCPY_IN_RECORD_LEN; } rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); out: - kfree(inbuf); - mb(); /* Don't write source/read dest before DMA is complete */ return rc; @@ -685,16 +680,12 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); unsigned timeout = HZ; unsigned index, rxqs_count; - __le32 *rxqs; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX); int rc; BUILD_BUG_ON(VF_MAX_RX_QUEUES > MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); - rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL); - if (rxqs == NULL) - return VFDI_RC_ENOMEM; - rtnl_lock(); siena_prepare_flush(efx); rtnl_unlock(); @@ -709,14 +700,19 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) vf_offset + index); efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ); } - if (test_bit(index, vf->rxq_mask)) - rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index); + if (test_bit(index, vf->rxq_mask)) { + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; + } } atomic_set(&vf->rxq_retry_count, 0); while (timeout && (vf->rxq_count || vf->txq_count)) { - rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs, - rxqs_count * sizeof(*rxqs), NULL, 0, NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count), + NULL, 0, NULL); WARN_ON(rc < 0); timeout = wait_event_timeout(vf->flush_waitq, @@ -726,8 +722,10 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) for (index = 0; index < count; ++index) { if (test_and_clear_bit(index, vf->rxq_retry_mask)) { atomic_dec(&vf->rxq_retry_count); - rxqs[rxqs_count++] = - cpu_to_le32(vf_offset + index); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; } } } @@ -750,7 +748,6 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) } efx_sriov_bufs(efx, vf->buftbl_base, NULL, EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); - kfree(rxqs); efx_vfdi_flush_clear(vf); vf->evq0_count = 0; -- cgit v0.10.2 From 9528b9219348e0a013f4b587958a8ba9c96d7e20 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Sep 2012 17:31:41 +0100 Subject: sfc: Ensure MCDI buffers, but not lengths, are dword aligned We currently require that MCDI request and response lengths are multiples of 4 bytes, because we will copy dwords in and out of shared memory and we want to be sure we won't read or write out of bounds. But all we really need to know is that there is sufficient padding for that. Also, we should ensure that buffers are dword-aligned, as on some architectures misaligned access will result in data corruption or a crash. Change the buffer type to array-of-efx_dword_t and remove the requirement that the lengths are multiples of 4. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d6d1ff1..3f55b16 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -67,7 +67,7 @@ void efx_mcdi_init(struct efx_nic *efx) } static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, - const u8 *inbuf, size_t inlen) + const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); @@ -75,9 +75,10 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, unsigned int i; efx_dword_t hdr; u32 xflags, seqno; + unsigned int inlen_dw = DIV_ROUND_UP(inlen, 4); BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); - BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN); + BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V1); seqno = mcdi->seqno & SEQ_MASK; xflags = 0; @@ -94,8 +95,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, efx_writed(efx, &hdr, pdu); - for (i = 0; i < inlen; i += 4) - _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); + for (i = 0; i < inlen_dw; i++) + efx_writed(efx, &inbuf[i], pdu + 4 + 4 * i); /* Ensure the payload is written out before the header */ wmb(); @@ -104,17 +105,19 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); } -static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) +static void +efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); int i; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); - BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN); + BUG_ON(outlen > MCDI_CTL_SDU_LEN_MAX_V1); - for (i = 0; i < outlen; i += 4) - *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); + for (i = 0; i < outlen_dw; i++) + efx_readd(efx, &outbuf[i], pdu + 4 + 4 * i); } static int efx_mcdi_poll(struct efx_nic *efx) @@ -328,7 +331,8 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, } int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, - const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); @@ -336,8 +340,8 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, outbuf, outlen, outlen_actual); } -void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, - size_t inlen) +void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); @@ -354,7 +358,8 @@ void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, } int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, - u8 *outbuf, size_t outlen, size_t *outlen_actual) + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); int rc; @@ -393,7 +398,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, if (rc == 0) { efx_mcdi_copyout(efx, outbuf, - min(outlen, mcdi->resplen + 3) & ~0x3); + min(outlen, mcdi->resplen)); if (outlen_actual != NULL) *outlen_actual = resplen; } else if (cmd == MC_CMD_REBOOT && rc == -EIO) diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index f8ab64f..28657a1 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -67,16 +67,18 @@ struct efx_mcdi_mon { extern void efx_mcdi_init(struct efx_nic *efx); -extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, - size_t inlen, u8 *outbuf, size_t outlen, +extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, - const u8 *inbuf, size_t inlen); + const efx_dword_t *inbuf, size_t inlen); extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, - u8 *outbuf, size_t outlen, + efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); + extern int efx_mcdi_poll_reboot(struct efx_nic *efx); extern void efx_mcdi_mode_poll(struct efx_nic *efx); extern void efx_mcdi_mode_event(struct efx_nic *efx); @@ -85,14 +87,21 @@ extern void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); +/* We expect that 16- and 32-bit fields in MCDI requests and responses + * are appropriately aligned. Also, on Siena we must copy to the MC + * shared memory strictly 32 bits at a time, so add any necessary + * padding. + */ #define MCDI_DECLARE_BUF(_name, _len) \ - u8 _name[ALIGN(_len, 4)] + efx_dword_t _name[DIV_ROUND_UP(_len, 4)] #define _MCDI_PTR(_buf, _offset) \ ((u8 *)(_buf) + (_offset)) #define MCDI_PTR(_buf, _field) \ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) +#define _MCDI_CHECK_ALIGN(_ofst, _align) \ + ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1))) #define _MCDI_DWORD(_buf, _field) \ - ((efx_dword_t *)MCDI_PTR(_buf, _field)) + ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) #define MCDI_SET_DWORD(_buf, _field, _value) \ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) @@ -109,22 +118,23 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \ MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1) -#define _MCDI_ARRAY_PTR(_buf, _field, _index) \ - (MCDI_PTR(_buf, _field) + \ - (_index) * MC_CMD_ ## _field ## _LEN) +#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \ + (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\ + + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align)) #define MCDI_DECLARE_STRUCT_PTR(_name) \ - u8 *_name -#define MCDI_ARRAY_STRUCT_PTR _MCDI_ARRAY_PTR + efx_dword_t *_name +#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \ + ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) #define MCDI_VAR_ARRAY_LEN(_len, _field) \ min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \ ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN) #define MCDI_ARRAY_WORD(_buf, _field, _index) \ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ le16_to_cpu(*(__force const __le16 *) \ - _MCDI_ARRAY_PTR(_buf, _field, _index))) + _MCDI_ARRAY_PTR(_buf, _field, _index, 2))) #define _MCDI_ARRAY_DWORD(_buf, _field, _index) \ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \ - (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index)) + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) #define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \ EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \ EFX_DWORD_0, _value) diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index ec2cf4d..5612021 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -571,9 +571,8 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) struct efx_ptp_data *ptp_data = efx->ptp_data; struct skb_shared_hwtstamps timestamps; int rc = -EIO; - /* MCDI driver requires word aligned lengths */ - size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4); MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); + size_t len; MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); @@ -591,9 +590,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) skb_copy_from_linear_data(skb, MCDI_PTR(ptp_data->txbuf, PTP_IN_TRANSMIT_PACKET), - len); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, ptp_data->txbuf, len, txtime, - sizeof(txtime), &len); + skb->len); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, + ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), + txtime, sizeof(txtime), &len); if (rc != 0) goto fail; -- cgit v0.10.2 From 338f74df399d652788cf3bab247257ae90419c7d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 10 Oct 2012 23:20:17 +0100 Subject: sfc: Add and use MCDI_SET_QWORD() and MCDI_SET_ARRAY_QWORD() No need to keep open-coding the assignment of high and low dwords. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 28657a1..899f094 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -88,9 +88,9 @@ extern void efx_mcdi_process_event(struct efx_channel *channel, extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); /* We expect that 16- and 32-bit fields in MCDI requests and responses - * are appropriately aligned. Also, on Siena we must copy to the MC - * shared memory strictly 32 bits at a time, so add any necessary - * padding. + * are appropriately aligned, but 64-bit fields are only + * 32-bit-aligned. Also, on Siena we must copy to the MC shared + * memory strictly 32 bits at a time, so add any necessary padding. */ #define MCDI_DECLARE_BUF(_name, _len) \ efx_dword_t _name[DIV_ROUND_UP(_len, 4)] @@ -107,6 +107,13 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) #define MCDI_DWORD(_buf, _field) \ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_SET_QWORD(_buf, _field, _value) \ + do { \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ + EFX_DWORD_0, (u32)(_value)); \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) #define MCDI_QWORD(_buf, _field) \ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) @@ -140,6 +147,16 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); EFX_DWORD_0, _value) #define MCDI_ARRAY_DWORD(_buf, _field, _index) \ EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0) +#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \ + do { \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\ + EFX_DWORD_0, (u32)(_value)); \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) #define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \ _type ## _TYPEDEF, _field2) diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 4e8a138..3179b2b 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -95,10 +95,8 @@ static int efx_mcdi_mon_update(struct efx_nic *efx) MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_IN_LEN); int rc; - MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO, - hwmon->dma_buf.dma_addr & 0xffffffff); - MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI, - (u64)hwmon->dma_buf.dma_addr >> 32); + MCDI_SET_QWORD(inbuf, READ_SENSORS_IN_DMA_ADDR, + hwmon->dma_buf.dma_addr); rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, inbuf, sizeof(inbuf), NULL, 0, NULL); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 5612021..d96bfc4 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -533,10 +533,8 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, num_readings); - MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO, - (u32)ptp->start.dma_addr); - MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI, - (u32)((u64)ptp->start.dma_addr >> 32)); + MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR, + ptp->start.dma_addr); /* Clear flag that signals MC ready */ ACCESS_ONCE(*start) = 0; @@ -1378,9 +1376,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) (PPB_EXTRA_BITS + MAX_PPB_BITS)); MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); - MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns); - MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI, - (u32)(adjustment_ns >> 32)); + MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), @@ -1402,8 +1398,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); - MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0); - MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 198044f..2587d30 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -243,7 +243,8 @@ static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); MCDI_DECLARE_STRUCT_PTR(record); unsigned int index, used; - u32 from_rid, from_hi, from_lo; + u64 from_addr; + u32 from_rid; int rc; mb(); /* Finish writing source/reading dest before DMA starts */ @@ -258,14 +259,11 @@ static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, count); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, req->to_rid); - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO, - (u32)req->to_addr); - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI, - (u32)(req->to_addr >> 32)); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR, + req->to_addr); if (req->from_buf == NULL) { from_rid = req->from_rid; - from_lo = (u32)req->from_addr; - from_hi = (u32)(req->from_addr >> 32); + from_addr = req->from_addr; } else { if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX_V1)) { @@ -274,18 +272,15 @@ static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, } from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; - from_lo = used; - from_hi = 0; + from_addr = used; memcpy(_MCDI_PTR(inbuf, used), req->from_buf, req->length); used += req->length; } MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO, - from_lo); - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI, - from_hi); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR, + from_addr); MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, req->length); -- cgit v0.10.2 From 6bff861dc798ed36da0cb80f0201da2fe9d9dc69 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Sep 2012 02:33:52 +0100 Subject: sfc: Move siena_reset_hw() and siena_map_reset_reason() into MCDI module These implementations should work for EF10 too. Rename them accordingly. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 3f55b16..4781e02 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1087,7 +1087,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) __func__, rc); } -int efx_mcdi_reset_port(struct efx_nic *efx) +static int efx_mcdi_reset_port(struct efx_nic *efx) { int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); if (rc) @@ -1096,7 +1096,7 @@ int efx_mcdi_reset_port(struct efx_nic *efx) return rc; } -int efx_mcdi_reset_mc(struct efx_nic *efx) +static int efx_mcdi_reset_mc(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); int rc; @@ -1114,6 +1114,26 @@ int efx_mcdi_reset_mc(struct efx_nic *efx) return rc; } +enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) +{ + return RESET_TYPE_RECOVER_OR_ALL; +} + +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc; + + /* Recover from a failed assertion pre-reset */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + if (method == RESET_TYPE_WORLD) + return efx_mcdi_reset_mc(efx); + else + return efx_mcdi_reset_port(efx); +} + static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, const u8 *mac, int *id_out) { diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 899f094..e62dc04 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -190,8 +190,6 @@ extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); extern int efx_mcdi_handle_assertion(struct efx_nic *efx); extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); -extern int efx_mcdi_reset_port(struct efx_nic *efx); -extern int efx_mcdi_reset_mc(struct efx_nic *efx); extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out); extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); @@ -203,6 +201,8 @@ extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, u32 dma_len, int enable, int clear); extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx); extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); +extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); +extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); #ifdef CONFIG_SFC_MCDI_MON extern int efx_mcdi_mon_probe(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 8c91775..6babc8e 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -30,7 +30,6 @@ /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ static void siena_init_wol(struct efx_nic *efx); -static int siena_reset_hw(struct efx_nic *efx, enum reset_type method); static void siena_push_irq_moderation(struct efx_channel *channel) @@ -178,7 +177,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) /* Reset the chip immediately so that it is completely * quiescent regardless of what any VF driver does. */ - rc = siena_reset_hw(efx, reset_method); + rc = efx_mcdi_reset(efx, reset_method); if (rc) goto out; @@ -187,7 +186,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ARRAY_SIZE(siena_register_tests)) ? -1 : 1; - rc = siena_reset_hw(efx, reset_method); + rc = efx_mcdi_reset(efx, reset_method); out: rc2 = efx_reset_up(efx, reset_method, rc == 0); return rc ? rc : rc2; @@ -200,11 +199,6 @@ out: ************************************************************************** */ -static enum reset_type siena_map_reset_reason(enum reset_type reason) -{ - return RESET_TYPE_RECOVER_OR_ALL; -} - static int siena_map_reset_flags(u32 *flags) { enum { @@ -230,21 +224,6 @@ static int siena_map_reset_flags(u32 *flags) return -EINVAL; } -static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) -{ - int rc; - - /* Recover from a failed assertion pre-reset */ - rc = efx_mcdi_handle_assertion(efx); - if (rc) - return rc; - - if (method == RESET_TYPE_WORLD) - return efx_mcdi_reset_mc(efx); - else - return efx_mcdi_reset_port(efx); -} - #ifdef CONFIG_EEH /* When a PCI device is isolated from the bus, a subsequent MMIO read is * required for the kernel EEH mechanisms to notice. As the Solarflare driver @@ -327,7 +306,7 @@ static int siena_probe_nic(struct efx_nic *efx) "Host already registered with MCPU\n"); /* Now we can reset the NIC */ - rc = siena_reset_hw(efx, RESET_TYPE_ALL); + rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; @@ -458,7 +437,7 @@ static void siena_remove_nic(struct efx_nic *efx) efx_nic_free_buffer(efx, &efx->irq_status); - siena_reset_hw(efx, RESET_TYPE_ALL); + efx_mcdi_reset(efx, RESET_TYPE_ALL); /* Relinquish the device back to the BMC */ efx_mcdi_drv_attach(efx, false, NULL); @@ -688,9 +667,9 @@ const struct efx_nic_type siena_a0_nic_type = { #else .monitor = NULL, #endif - .map_reset_reason = siena_map_reset_reason, + .map_reset_reason = efx_mcdi_map_reset_reason, .map_reset_flags = siena_map_reset_flags, - .reset = siena_reset_hw, + .reset = efx_mcdi_reset, .probe_port = siena_probe_port, .remove_port = siena_remove_port, .prepare_flush = siena_prepare_flush, -- cgit v0.10.2 From 319ec6444d723f9f01b96728c163d7eaf75e24d7 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 8 Oct 2012 16:56:18 +0100 Subject: sfc: Move efx_mcdi_mac_reconfigure() to siena.c and rename EF10 does not include a multicast hash filter, so this function is specific to Siena. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index e62dc04..73bc76e 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -199,7 +199,6 @@ extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); extern int efx_mcdi_set_mac(struct efx_nic *efx); extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, u32 dma_len, int enable, int clear); -extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx); extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c index cf16bf1..1b84b90 100644 --- a/drivers/net/ethernet/sfc/mcdi_mac.c +++ b/drivers/net/ethernet/sfc/mcdi_mac.c @@ -112,24 +112,3 @@ fail: __func__, enable ? "enable" : "disable", rc); return rc; } - -int efx_mcdi_mac_reconfigure(struct efx_nic *efx) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); - int rc; - - BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN != - MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + - sizeof(efx->multicast_hash)); - - WARN_ON(!mutex_is_locked(&efx->mac_lock)); - - rc = efx_mcdi_set_mac(efx); - if (rc != 0) - return rc; - - memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), - efx->multicast_hash.byte, sizeof(efx->multicast_hash)); - return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, - inbuf, sizeof(inbuf), NULL, 0, NULL); -} diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 6babc8e..1febedb 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -577,6 +577,27 @@ static void siena_stop_nic_stats(struct efx_nic *efx) efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); } +static int siena_mac_reconfigure(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN != + MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + + sizeof(efx->multicast_hash)); + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + rc = efx_mcdi_set_mac(efx); + if (rc != 0) + return rc; + + memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), + efx->multicast_hash.byte, sizeof(efx->multicast_hash)); + return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + /************************************************************************** * * Wake on LAN @@ -679,7 +700,7 @@ const struct efx_nic_type siena_a0_nic_type = { .stop_stats = siena_stop_nic_stats, .set_id_led = efx_mcdi_set_id_led, .push_irq_moderation = siena_push_irq_moderation, - .reconfigure_mac = efx_mcdi_mac_reconfigure, + .reconfigure_mac = siena_mac_reconfigure, .check_mac_fault = efx_mcdi_mac_check_fault, .reconfigure_port = efx_mcdi_phy_reconfigure, .get_wol = siena_get_wol, -- cgit v0.10.2 From 43f775b2fa759a9c126a911f999f35aeb0fbbb84 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Sep 2012 02:33:54 +0100 Subject: sfc: Collect all MCDI port functions into mcdi_port.c Collect together MCDI port functions from mcdi.c, mcdi_mac.c, mcdi_phy.c and siena.c. Rename the 'siena' functions accordingly. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 84f9c96..5b31d8a 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -1,8 +1,7 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ - mcdi_mac.o \ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ tenxpress.o txc43128_phy.o falcon_boards.o \ - mcdi.o mcdi_phy.o mcdi_mon.o ptp.o + mcdi.o mcdi_port.o mcdi_mon.o ptp.o sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 4781e02..5132aba 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -514,36 +514,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) spin_unlock(&mcdi->iface_lock); } -static unsigned int efx_mcdi_event_link_speed[] = { - [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, - [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, - [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, -}; - - -static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) -{ - u32 flags, fcntl, speed, lpa; - - speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); - EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); - speed = efx_mcdi_event_link_speed[speed]; - - flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); - fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); - lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); - - /* efx->link_state is only modified by efx_mcdi_phy_get_link(), - * which is only run after flushing the event queues. Therefore, it - * is safe to modify the link state outside of the mac_lock here. - */ - efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); - - efx_mcdi_phy_check_fcntl(efx, lpa); - - efx_link_status_changed(efx); -} - /* Called from falcon_process_eventq for MCDI events */ void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event) diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 73bc76e..c8f2cd5 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -196,9 +196,14 @@ extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); +extern int efx_mcdi_port_probe(struct efx_nic *efx); +extern void efx_mcdi_port_remove(struct efx_nic *efx); +extern int efx_mcdi_port_reconfigure(struct efx_nic *efx); +extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); extern int efx_mcdi_set_mac(struct efx_nic *efx); -extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, - u32 dma_len, int enable, int clear); +#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) +extern void efx_mcdi_mac_start_stats(struct efx_nic *efx); +extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx); extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c deleted file mode 100644 index 1b84b90..0000000 --- a/drivers/net/ethernet/sfc/mcdi_mac.c +++ /dev/null @@ -1,114 +0,0 @@ -/**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2009-2010 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -#include "net_driver.h" -#include "efx.h" -#include "mcdi.h" -#include "mcdi_pcol.h" - -int efx_mcdi_set_mac(struct efx_nic *efx) -{ - u32 reject, fcntl; - MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); - - memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), - efx->net_dev->dev_addr, ETH_ALEN); - - MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, - EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); - MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); - - /* The MCDI command provides for controlling accept/reject - * of broadcast packets too, but the driver doesn't currently - * expose this. */ - reject = (efx->promiscuous) ? 0 : - (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN); - MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject); - - switch (efx->wanted_fc) { - case EFX_FC_RX | EFX_FC_TX: - fcntl = MC_CMD_FCNTL_BIDIR; - break; - case EFX_FC_RX: - fcntl = MC_CMD_FCNTL_RESPOND; - break; - default: - fcntl = MC_CMD_FCNTL_OFF; - break; - } - if (efx->wanted_fc & EFX_FC_AUTO) - fcntl = MC_CMD_FCNTL_AUTO; - if (efx->fc_disable) - fcntl = MC_CMD_FCNTL_OFF; - - MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); - - return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), - NULL, 0, NULL); -} - -bool efx_mcdi_mac_check_fault(struct efx_nic *efx) -{ - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); - size_t outlength; - int rc; - - BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), &outlength); - if (rc) { - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); - return true; - } - - return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; -} - -int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, - u32 dma_len, int enable, int clear) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); - int rc; - efx_dword_t *cmd_ptr; - int period = enable ? 1000 : 0; - u32 addr_hi; - u32 addr_lo; - - BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); - - addr_lo = ((u64)dma_addr) >> 0; - addr_hi = ((u64)dma_addr) >> 32; - - MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo); - MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); - cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); - EFX_POPULATE_DWORD_7(*cmd_ptr, - MC_CMD_MAC_STATS_IN_DMA, !!enable, - MC_CMD_MAC_STATS_IN_CLEAR, clear, - MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1, - MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable, - MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0, - MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1, - MC_CMD_MAC_STATS_IN_PERIOD_MS, period); - MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); - - rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), - NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n", - __func__, enable ? "enable" : "disable", rc); - return rc; -} diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c deleted file mode 100644 index 86c0d21..0000000 --- a/drivers/net/ethernet/sfc/mcdi_phy.c +++ /dev/null @@ -1,825 +0,0 @@ -/**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2009-2010 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -/* - * Driver for PHY related operations via MCDI. - */ - -#include -#include "efx.h" -#include "phy.h" -#include "mcdi.h" -#include "mcdi_pcol.h" -#include "nic.h" -#include "selftest.h" - -struct efx_mcdi_phy_data { - u32 flags; - u32 type; - u32 supported_cap; - u32 channel; - u32 port; - u32 stats_mask; - u8 name[20]; - u32 media; - u32 mmd_mask; - u8 revision[20]; - u32 forced_cap; -}; - -static int -efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) -{ - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); - size_t outlen; - int rc; - - BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); - BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); - - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - - if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { - rc = -EIO; - goto fail; - } - - cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); - cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); - cfg->supported_cap = - MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); - cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); - cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); - cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); - memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), - sizeof(cfg->name)); - cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); - cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); - memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), - sizeof(cfg->revision)); - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, - u32 flags, u32 loopback_mode, - u32 loopback_speed) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); - int rc; - - BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); - - MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); - MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); - MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); - MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); - - rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), - NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) -{ - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); - size_t outlen; - int rc; - - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - - if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { - rc = -EIO; - goto fail; - } - - *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED); - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, - unsigned int prtad, unsigned int devad, u16 addr, - u16 *value_out, u32 *status_out) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); - - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - - *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); - *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS); - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, - unsigned int prtad, unsigned int devad, u16 addr, - u16 value, u32 *status_out) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); - - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - - *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS); - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) -{ - u32 result = 0; - - switch (media) { - case MC_CMD_MEDIA_KX4: - result |= SUPPORTED_Backplane; - if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseKX_Full; - if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseKX4_Full; - break; - - case MC_CMD_MEDIA_XFP: - case MC_CMD_MEDIA_SFP_PLUS: - result |= SUPPORTED_FIBRE; - break; - - case MC_CMD_MEDIA_BASE_T: - result |= SUPPORTED_TP; - if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) - result |= SUPPORTED_10baseT_Half; - if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) - result |= SUPPORTED_10baseT_Full; - if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) - result |= SUPPORTED_100baseT_Half; - if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) - result |= SUPPORTED_100baseT_Full; - if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) - result |= SUPPORTED_1000baseT_Half; - if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseT_Full; - if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseT_Full; - break; - } - - if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) - result |= SUPPORTED_Pause; - if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) - result |= SUPPORTED_Asym_Pause; - if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) - result |= SUPPORTED_Autoneg; - - return result; -} - -static u32 ethtool_to_mcdi_cap(u32 cap) -{ - u32 result = 0; - - if (cap & SUPPORTED_10baseT_Half) - result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); - if (cap & SUPPORTED_10baseT_Full) - result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); - if (cap & SUPPORTED_100baseT_Half) - result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); - if (cap & SUPPORTED_100baseT_Full) - result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); - if (cap & SUPPORTED_1000baseT_Half) - result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); - if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) - result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); - if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) - result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); - if (cap & SUPPORTED_Pause) - result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); - if (cap & SUPPORTED_Asym_Pause) - result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); - if (cap & SUPPORTED_Autoneg) - result |= (1 << MC_CMD_PHY_CAP_AN_LBN); - - return result; -} - -static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - enum efx_phy_mode mode, supported; - u32 flags; - - /* TODO: Advertise the capabilities supported by this PHY */ - supported = 0; - if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN)) - supported |= PHY_MODE_TX_DISABLED; - if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN)) - supported |= PHY_MODE_LOW_POWER; - if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN)) - supported |= PHY_MODE_OFF; - - mode = efx->phy_mode & supported; - - flags = 0; - if (mode & PHY_MODE_TX_DISABLED) - flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN); - if (mode & PHY_MODE_LOW_POWER) - flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN); - if (mode & PHY_MODE_OFF) - flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); - - return flags; -} - -static u32 mcdi_to_ethtool_media(u32 media) -{ - switch (media) { - case MC_CMD_MEDIA_XAUI: - case MC_CMD_MEDIA_CX4: - case MC_CMD_MEDIA_KX4: - return PORT_OTHER; - - case MC_CMD_MEDIA_XFP: - case MC_CMD_MEDIA_SFP_PLUS: - return PORT_FIBRE; - - case MC_CMD_MEDIA_BASE_T: - return PORT_TP; - - default: - return PORT_OTHER; - } -} - -static int efx_mcdi_phy_probe(struct efx_nic *efx) -{ - struct efx_mcdi_phy_data *phy_data; - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); - u32 caps; - int rc; - - /* Initialise and populate phy_data */ - phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); - if (phy_data == NULL) - return -ENOMEM; - - rc = efx_mcdi_get_phy_cfg(efx, phy_data); - if (rc != 0) - goto fail; - - /* Read initial link advertisement */ - BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), NULL); - if (rc) - goto fail; - - /* Fill out nic state */ - efx->phy_data = phy_data; - efx->phy_type = phy_data->type; - - efx->mdio_bus = phy_data->channel; - efx->mdio.prtad = phy_data->port; - efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); - efx->mdio.mode_support = 0; - if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) - efx->mdio.mode_support |= MDIO_SUPPORTS_C22; - if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) - efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - - caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); - if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) - efx->link_advertising = - mcdi_to_ethtool_cap(phy_data->media, caps); - else - phy_data->forced_cap = caps; - - /* Assert that we can map efx -> mcdi loopback modes */ - BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); - BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); - BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); - BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); - BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); - BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); - BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); - BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); - BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); - BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); - BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); - BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); - BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); - BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); - BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); - BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); - BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); - BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); - BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); - BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); - BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); - BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); - BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); - BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); - BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); - BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); - BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); - - rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); - if (rc != 0) - goto fail; - /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, - * but by convention we don't */ - efx->loopback_modes &= ~(1 << LOOPBACK_NONE); - - /* Set the initial link mode */ - efx_mcdi_phy_decode_link( - efx, &efx->link_state, - MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), - MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), - MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); - - /* Default to Autonegotiated flow control if the PHY supports it */ - efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; - if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) - efx->wanted_fc |= EFX_FC_AUTO; - efx_link_set_wanted_fc(efx, efx->wanted_fc); - - return 0; - -fail: - kfree(phy_data); - return rc; -} - -int efx_mcdi_phy_reconfigure(struct efx_nic *efx) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u32 caps = (efx->link_advertising ? - ethtool_to_mcdi_cap(efx->link_advertising) : - phy_cfg->forced_cap); - - return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), - efx->loopback_mode, 0); -} - -void efx_mcdi_phy_decode_link(struct efx_nic *efx, - struct efx_link_state *link_state, - u32 speed, u32 flags, u32 fcntl) -{ - switch (fcntl) { - case MC_CMD_FCNTL_AUTO: - WARN_ON(1); /* This is not a link mode */ - link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; - break; - case MC_CMD_FCNTL_BIDIR: - link_state->fc = EFX_FC_TX | EFX_FC_RX; - break; - case MC_CMD_FCNTL_RESPOND: - link_state->fc = EFX_FC_RX; - break; - default: - WARN_ON(1); - case MC_CMD_FCNTL_OFF: - link_state->fc = 0; - break; - } - - link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); - link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); - link_state->speed = speed; -} - -/* Verify that the forced flow control settings (!EFX_FC_AUTO) are - * supported by the link partner. Warn the user if this isn't the case - */ -void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u32 rmtadv; - - /* The link partner capabilities are only relevant if the - * link supports flow control autonegotiation */ - if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) - return; - - /* If flow control autoneg is supported and enabled, then fine */ - if (efx->wanted_fc & EFX_FC_AUTO) - return; - - rmtadv = 0; - if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) - rmtadv |= ADVERTISED_Pause; - if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) - rmtadv |= ADVERTISED_Asym_Pause; - - if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) - netif_err(efx, link, efx->net_dev, - "warning: link partner doesn't support pause frames"); -} - -static bool efx_mcdi_phy_poll(struct efx_nic *efx) -{ - struct efx_link_state old_state = efx->link_state; - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); - int rc; - - WARN_ON(!mutex_is_locked(&efx->mac_lock)); - - BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), NULL); - if (rc) { - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); - efx->link_state.up = false; - } else { - efx_mcdi_phy_decode_link( - efx, &efx->link_state, - MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), - MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), - MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); - } - - return !efx_link_state_equal(&efx->link_state, &old_state); -} - -static void efx_mcdi_phy_remove(struct efx_nic *efx) -{ - struct efx_mcdi_phy_data *phy_data = efx->phy_data; - - efx->phy_data = NULL; - kfree(phy_data); -} - -static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); - int rc; - - ecmd->supported = - mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); - ecmd->advertising = efx->link_advertising; - ethtool_cmd_speed_set(ecmd, efx->link_state.speed); - ecmd->duplex = efx->link_state.fd; - ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); - ecmd->phy_address = phy_cfg->port; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); - ecmd->mdio_support = (efx->mdio.mode_support & - (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); - - BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), NULL); - if (rc) { - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); - return; - } - ecmd->lp_advertising = - mcdi_to_ethtool_cap(phy_cfg->media, - MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); -} - -static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u32 caps; - int rc; - - if (ecmd->autoneg) { - caps = (ethtool_to_mcdi_cap(ecmd->advertising) | - 1 << MC_CMD_PHY_CAP_AN_LBN); - } else if (ecmd->duplex) { - switch (ethtool_cmd_speed(ecmd)) { - case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; - case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; - case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; - case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; - default: return -EINVAL; - } - } else { - switch (ethtool_cmd_speed(ecmd)) { - case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; - case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; - case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; - default: return -EINVAL; - } - } - - rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), - efx->loopback_mode, 0); - if (rc) - return rc; - - if (ecmd->autoneg) { - efx_link_set_advertising( - efx, ecmd->advertising | ADVERTISED_Autoneg); - phy_cfg->forced_cap = 0; - } else { - efx_link_set_advertising(efx, 0); - phy_cfg->forced_cap = caps; - } - return 0; -} - -static int efx_mcdi_phy_test_alive(struct efx_nic *efx) -{ - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); - size_t outlen; - int rc; - - BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) - return rc; - - if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) - return -EIO; - if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK) - return -EINVAL; - - return 0; -} - -static const char *const mcdi_sft9001_cable_diag_names[] = { - "cable.pairA.length", - "cable.pairB.length", - "cable.pairC.length", - "cable.pairD.length", - "cable.pairA.status", - "cable.pairB.status", - "cable.pairC.status", - "cable.pairD.status", -}; - -static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, - int *results) -{ - unsigned int retry, i, count = 0; - size_t outlen; - u32 status; - MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN); - u8 *ptr; - int rc; - - BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); - MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); - rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, - inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); - if (rc) - goto out; - - /* Wait up to 10s for BIST to finish */ - for (retry = 0; retry < 100; ++retry) { - BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto out; - - status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); - if (status != MC_CMD_POLL_BIST_RUNNING) - goto finished; - - msleep(100); - } - - rc = -ETIMEDOUT; - goto out; - -finished: - results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; - - /* SFT9001 specific cable diagnostics output */ - if (efx->phy_type == PHY_TYPE_SFT9001B && - (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || - bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { - ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); - if (status == MC_CMD_POLL_BIST_PASSED && - outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { - for (i = 0; i < 8; i++) { - results[count + i] = - EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], - EFX_DWORD_0); - } - } - count += 8; - } - rc = count; - -out: - return rc; -} - -static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, - unsigned flags) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u32 mode; - int rc; - - if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { - rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); - if (rc < 0) - return rc; - - results += rc; - } - - /* If we support both LONG and SHORT, then run each in response to - * break or not. Otherwise, run the one we support */ - mode = 0; - if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) { - if ((flags & ETH_TEST_FL_OFFLINE) && - (phy_cfg->flags & - (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) - mode = MC_CMD_PHY_BIST_CABLE_LONG; - else - mode = MC_CMD_PHY_BIST_CABLE_SHORT; - } else if (phy_cfg->flags & - (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)) - mode = MC_CMD_PHY_BIST_CABLE_LONG; - - if (mode != 0) { - rc = efx_mcdi_bist(efx, mode, results); - if (rc < 0) - return rc; - results += rc; - } - - return 0; -} - -static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, - unsigned int index) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - - if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { - if (index == 0) - return "bist"; - --index; - } - - if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) | - (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) { - if (index == 0) - return "cable"; - --index; - - if (efx->phy_type == PHY_TYPE_SFT9001B) { - if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) - return mcdi_sft9001_cable_diag_names[index]; - index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); - } - } - - return NULL; -} - -#define SFP_PAGE_SIZE 128 -#define SFP_NUM_PAGES 2 -static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, - struct ethtool_eeprom *ee, u8 *data) -{ - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); - MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); - size_t outlen; - int rc; - unsigned int payload_len; - unsigned int space_remaining = ee->len; - unsigned int page; - unsigned int page_off; - unsigned int to_copy; - u8 *user_data = data; - - BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN); - - page_off = ee->offset % SFP_PAGE_SIZE; - page = ee->offset / SFP_PAGE_SIZE; - - while (space_remaining && (page < SFP_NUM_PAGES)) { - MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); - - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO, - inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), - &outlen); - if (rc) - return rc; - - if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + - SFP_PAGE_SIZE)) - return -EIO; - - payload_len = MCDI_DWORD(outbuf, - GET_PHY_MEDIA_INFO_OUT_DATALEN); - if (payload_len != SFP_PAGE_SIZE) - return -EIO; - - /* Copy as much as we can into data */ - payload_len -= page_off; - to_copy = (space_remaining < payload_len) ? - space_remaining : payload_len; - - memcpy(user_data, - MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off, - to_copy); - - space_remaining -= to_copy; - user_data += to_copy; - page_off = 0; - page++; - } - - return 0; -} - -static int efx_mcdi_phy_get_module_info(struct efx_nic *efx, - struct ethtool_modinfo *modinfo) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - - switch (phy_cfg->media) { - case MC_CMD_MEDIA_SFP_PLUS: - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - return 0; - default: - return -EOPNOTSUPP; - } -} - -const struct efx_phy_operations efx_mcdi_phy_ops = { - .probe = efx_mcdi_phy_probe, - .init = efx_port_dummy_op_int, - .reconfigure = efx_mcdi_phy_reconfigure, - .poll = efx_mcdi_phy_poll, - .fini = efx_port_dummy_op_void, - .remove = efx_mcdi_phy_remove, - .get_settings = efx_mcdi_phy_get_settings, - .set_settings = efx_mcdi_phy_set_settings, - .test_alive = efx_mcdi_phy_test_alive, - .run_tests = efx_mcdi_phy_run_tests, - .test_name = efx_mcdi_phy_test_name, - .get_module_eeprom = efx_mcdi_phy_get_module_eeprom, - .get_module_info = efx_mcdi_phy_get_module_info, -}; diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c new file mode 100644 index 0000000..8f31e3d --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -0,0 +1,1010 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2009-2010 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* + * Driver for PHY related operations via MCDI. + */ + +#include +#include "efx.h" +#include "phy.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" +#include "selftest.h" + +struct efx_mcdi_phy_data { + u32 flags; + u32 type; + u32 supported_cap; + u32 channel; + u32 port; + u32 stats_mask; + u8 name[20]; + u32 media; + u32 mmd_mask; + u8 revision[20]; + u32 forced_cap; +}; + +static int +efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { + rc = -EIO; + goto fail; + } + + cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); + cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); + cfg->supported_cap = + MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); + cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); + cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); + cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); + memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), + sizeof(cfg->name)); + cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); + cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); + memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), + sizeof(cfg->revision)); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, + u32 loopback_speed) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); + + rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_mdio_read(struct net_device *net_dev, + int prtad, int devad, u16 addr) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); + + rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_mdio_write(struct net_device *net_dev, + int prtad, int devad, u16 addr, u16 value) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); + + rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) +{ + u32 result = 0; + + switch (media) { + case MC_CMD_MEDIA_KX4: + result |= SUPPORTED_Backplane; + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + result |= SUPPORTED_1000baseKX_Full; + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + result |= SUPPORTED_10000baseKX4_Full; + break; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + result |= SUPPORTED_FIBRE; + break; + + case MC_CMD_MEDIA_BASE_T: + result |= SUPPORTED_TP; + if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) + result |= SUPPORTED_10baseT_Half; + if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) + result |= SUPPORTED_10baseT_Full; + if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) + result |= SUPPORTED_100baseT_Half; + if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) + result |= SUPPORTED_100baseT_Full; + if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) + result |= SUPPORTED_1000baseT_Half; + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + result |= SUPPORTED_1000baseT_Full; + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + result |= SUPPORTED_10000baseT_Full; + break; + } + + if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + result |= SUPPORTED_Pause; + if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + result |= SUPPORTED_Asym_Pause; + if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + result |= SUPPORTED_Autoneg; + + return result; +} + +static u32 ethtool_to_mcdi_cap(u32 cap) +{ + u32 result = 0; + + if (cap & SUPPORTED_10baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); + if (cap & SUPPORTED_10baseT_Full) + result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); + if (cap & SUPPORTED_100baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); + if (cap & SUPPORTED_100baseT_Full) + result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); + if (cap & SUPPORTED_1000baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); + if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) + result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); + if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) + result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); + if (cap & SUPPORTED_Pause) + result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); + if (cap & SUPPORTED_Asym_Pause) + result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); + if (cap & SUPPORTED_Autoneg) + result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + + return result; +} + +static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + enum efx_phy_mode mode, supported; + u32 flags; + + /* TODO: Advertise the capabilities supported by this PHY */ + supported = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN)) + supported |= PHY_MODE_TX_DISABLED; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN)) + supported |= PHY_MODE_LOW_POWER; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN)) + supported |= PHY_MODE_OFF; + + mode = efx->phy_mode & supported; + + flags = 0; + if (mode & PHY_MODE_TX_DISABLED) + flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN); + if (mode & PHY_MODE_LOW_POWER) + flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN); + if (mode & PHY_MODE_OFF) + flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); + + return flags; +} + +static u32 mcdi_to_ethtool_media(u32 media) +{ + switch (media) { + case MC_CMD_MEDIA_XAUI: + case MC_CMD_MEDIA_CX4: + case MC_CMD_MEDIA_KX4: + return PORT_OTHER; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + return PORT_FIBRE; + + case MC_CMD_MEDIA_BASE_T: + return PORT_TP; + + default: + return PORT_OTHER; + } +} + +static void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl) +{ + switch (fcntl) { + case MC_CMD_FCNTL_AUTO: + WARN_ON(1); /* This is not a link mode */ + link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_BIDIR: + link_state->fc = EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_RESPOND: + link_state->fc = EFX_FC_RX; + break; + default: + WARN_ON(1); + case MC_CMD_FCNTL_OFF: + link_state->fc = 0; + break; + } + + link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); + link_state->speed = speed; +} + +static int efx_mcdi_phy_probe(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + u32 caps; + int rc; + + /* Initialise and populate phy_data */ + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (phy_data == NULL) + return -ENOMEM; + + rc = efx_mcdi_get_phy_cfg(efx, phy_data); + if (rc != 0) + goto fail; + + /* Read initial link advertisement */ + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + goto fail; + + /* Fill out nic state */ + efx->phy_data = phy_data; + efx->phy_type = phy_data->type; + + efx->mdio_bus = phy_data->channel; + efx->mdio.prtad = phy_data->port; + efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); + efx->mdio.mode_support = 0; + if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C22; + if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); + if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->link_advertising = + mcdi_to_ethtool_cap(phy_data->media, caps); + else + phy_data->forced_cap = caps; + + /* Assert that we can map efx -> mcdi loopback modes */ + BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); + BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); + BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); + BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); + BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); + BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); + BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); + BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); + BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); + BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); + BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); + BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); + BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); + BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); + BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); + BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); + BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); + BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); + BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); + BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); + BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); + + rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); + if (rc != 0) + goto fail; + /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, + * but by convention we don't */ + efx->loopback_modes &= ~(1 << LOOPBACK_NONE); + + /* Set the initial link mode */ + efx_mcdi_phy_decode_link( + efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + + /* Default to Autonegotiated flow control if the PHY supports it */ + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->wanted_fc |= EFX_FC_AUTO; + efx_link_set_wanted_fc(efx, efx->wanted_fc); + + return 0; + +fail: + kfree(phy_data); + return rc; +} + +int efx_mcdi_port_reconfigure(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps = (efx->link_advertising ? + ethtool_to_mcdi_cap(efx->link_advertising) : + phy_cfg->forced_cap); + + return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); +} + +/* Verify that the forced flow control settings (!EFX_FC_AUTO) are + * supported by the link partner. Warn the user if this isn't the case + */ +static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 rmtadv; + + /* The link partner capabilities are only relevant if the + * link supports flow control autonegotiation */ + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + return; + + /* If flow control autoneg is supported and enabled, then fine */ + if (efx->wanted_fc & EFX_FC_AUTO) + return; + + rmtadv = 0; + if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + rmtadv |= ADVERTISED_Pause; + if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + rmtadv |= ADVERTISED_Asym_Pause; + + if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) + netif_err(efx, link, efx->net_dev, + "warning: link partner doesn't support pause frames"); +} + +static bool efx_mcdi_phy_poll(struct efx_nic *efx) +{ + struct efx_link_state old_state = efx->link_state; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) { + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); + efx->link_state.up = false; + } else { + efx_mcdi_phy_decode_link( + efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + } + + return !efx_link_state_equal(&efx->link_state, &old_state); +} + +static void efx_mcdi_phy_remove(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + efx->phy_data = NULL; + kfree(phy_data); +} + +static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + ecmd->supported = + mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); + ecmd->advertising = efx->link_advertising; + ethtool_cmd_speed_set(ecmd, efx->link_state.speed); + ecmd->duplex = efx->link_state.fd; + ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); + ecmd->phy_address = phy_cfg->port; + ecmd->transceiver = XCVR_INTERNAL; + ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); + ecmd->mdio_support = (efx->mdio.mode_support & + (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) { + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); + return; + } + ecmd->lp_advertising = + mcdi_to_ethtool_cap(phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); +} + +static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + if (ecmd->autoneg) { + caps = (ethtool_to_mcdi_cap(ecmd->advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); + } else if (ecmd->duplex) { + switch (ethtool_cmd_speed(ecmd)) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; + case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; + default: return -EINVAL; + } + } else { + switch (ethtool_cmd_speed(ecmd)) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; + default: return -EINVAL; + } + } + + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); + if (rc) + return rc; + + if (ecmd->autoneg) { + efx_link_set_advertising( + efx, ecmd->advertising | ADVERTISED_Autoneg); + phy_cfg->forced_cap = 0; + } else { + efx_link_set_advertising(efx, 0); + phy_cfg->forced_cap = caps; + } + return 0; +} + +static int efx_mcdi_phy_test_alive(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) + return -EIO; + if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK) + return -EINVAL; + + return 0; +} + +static const char *const mcdi_sft9001_cable_diag_names[] = { + "cable.pairA.length", + "cable.pairB.length", + "cable.pairC.length", + "cable.pairD.length", + "cable.pairA.status", + "cable.pairB.status", + "cable.pairC.status", + "cable.pairD.status", +}; + +static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, + int *results) +{ + unsigned int retry, i, count = 0; + size_t outlen; + u32 status; + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN); + u8 *ptr; + int rc; + + BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); + rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, + inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); + if (rc) + goto out; + + /* Wait up to 10s for BIST to finish */ + for (retry = 0; retry < 100; ++retry) { + BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto out; + + status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + if (status != MC_CMD_POLL_BIST_RUNNING) + goto finished; + + msleep(100); + } + + rc = -ETIMEDOUT; + goto out; + +finished: + results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; + + /* SFT9001 specific cable diagnostics output */ + if (efx->phy_type == PHY_TYPE_SFT9001B && + (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || + bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { + ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); + if (status == MC_CMD_POLL_BIST_PASSED && + outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { + for (i = 0; i < 8; i++) { + results[count + i] = + EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], + EFX_DWORD_0); + } + } + count += 8; + } + rc = count; + +out: + return rc; +} + +static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned flags) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 mode; + int rc; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); + if (rc < 0) + return rc; + + results += rc; + } + + /* If we support both LONG and SHORT, then run each in response to + * break or not. Otherwise, run the one we support */ + mode = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) { + if ((flags & ETH_TEST_FL_OFFLINE) && + (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + else + mode = MC_CMD_PHY_BIST_CABLE_SHORT; + } else if (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + + if (mode != 0) { + rc = efx_mcdi_bist(efx, mode, results); + if (rc < 0) + return rc; + results += rc; + } + + return 0; +} + +static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + if (index == 0) + return "bist"; + --index; + } + + if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) | + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) { + if (index == 0) + return "cable"; + --index; + + if (efx->phy_type == PHY_TYPE_SFT9001B) { + if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) + return mcdi_sft9001_cable_diag_names[index]; + index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); + } + } + + return NULL; +} + +#define SFP_PAGE_SIZE 128 +#define SFP_NUM_PAGES 2 +static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); + size_t outlen; + int rc; + unsigned int payload_len; + unsigned int space_remaining = ee->len; + unsigned int page; + unsigned int page_off; + unsigned int to_copy; + u8 *user_data = data; + + BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN); + + page_off = ee->offset % SFP_PAGE_SIZE; + page = ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < SFP_NUM_PAGES)) { + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, + GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; + + /* Copy as much as we can into data */ + payload_len -= page_off; + to_copy = (space_remaining < payload_len) ? + space_remaining : payload_len; + + memcpy(user_data, + MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off, + to_copy); + + space_remaining -= to_copy; + user_data += to_copy; + page_off = 0; + page++; + } + + return 0; +} + +static int efx_mcdi_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + switch (phy_cfg->media) { + case MC_CMD_MEDIA_SFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct efx_phy_operations efx_mcdi_phy_ops = { + .probe = efx_mcdi_phy_probe, + .init = efx_port_dummy_op_int, + .reconfigure = efx_mcdi_port_reconfigure, + .poll = efx_mcdi_phy_poll, + .fini = efx_port_dummy_op_void, + .remove = efx_mcdi_phy_remove, + .get_settings = efx_mcdi_phy_get_settings, + .set_settings = efx_mcdi_phy_set_settings, + .test_alive = efx_mcdi_phy_test_alive, + .run_tests = efx_mcdi_phy_run_tests, + .test_name = efx_mcdi_phy_test_name, + .get_module_eeprom = efx_mcdi_phy_get_module_eeprom, + .get_module_info = efx_mcdi_phy_get_module_info, +}; + +static unsigned int efx_mcdi_event_link_speed[] = { + [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, + [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, + [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, +}; + +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) +{ + u32 flags, fcntl, speed, lpa; + + speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); + EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); + speed = efx_mcdi_event_link_speed[speed]; + + flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); + fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); + lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); + + /* efx->link_state is only modified by efx_mcdi_phy_get_link(), + * which is only run after flushing the event queues. Therefore, it + * is safe to modify the link state outside of the mac_lock here. + */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); + + efx_mcdi_phy_check_fcntl(efx, lpa); + + efx_link_status_changed(efx); +} + +int efx_mcdi_set_mac(struct efx_nic *efx) +{ + u32 reject, fcntl; + MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), + efx->net_dev->dev_addr, ETH_ALEN); + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, + EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); + + /* The MCDI command provides for controlling accept/reject + * of broadcast packets too, but the driver doesn't currently + * expose this. */ + reject = (efx->promiscuous) ? 0 : + (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN); + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject); + + switch (efx->wanted_fc) { + case EFX_FC_RX | EFX_FC_TX: + fcntl = MC_CMD_FCNTL_BIDIR; + break; + case EFX_FC_RX: + fcntl = MC_CMD_FCNTL_RESPOND; + break; + default: + fcntl = MC_CMD_FCNTL_OFF; + break; + } + if (efx->wanted_fc & EFX_FC_AUTO) + fcntl = MC_CMD_FCNTL_AUTO; + if (efx->fc_disable) + fcntl = MC_CMD_FCNTL_OFF; + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); + + return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), + NULL, 0, NULL); +} + +bool efx_mcdi_mac_check_fault(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + size_t outlength; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) { + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); + return true; + } + + return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; +} + +static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, + u32 dma_len, int enable, int clear) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + int rc; + efx_dword_t *cmd_ptr; + int period = enable ? 1000 : 0; + + BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); + cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); + EFX_POPULATE_DWORD_7(*cmd_ptr, + MC_CMD_MAC_STATS_IN_DMA, !!enable, + MC_CMD_MAC_STATS_IN_CLEAR, clear, + MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1, + MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable, + MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0, + MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1, + MC_CMD_MAC_STATS_IN_PERIOD_MS, period); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + + rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n", + __func__, enable ? "enable" : "disable", rc); + return rc; +} + +void efx_mcdi_mac_start_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + + dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + + efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, + MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); +} + +void efx_mcdi_mac_stop_stats(struct efx_nic *efx) +{ + efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); +} + +int efx_mcdi_port_probe(struct efx_nic *efx) +{ + int rc; + + /* Hook in PHY operations table */ + efx->phy_op = &efx_mcdi_phy_ops; + + /* Set up MDIO structure for PHY */ + efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + efx->mdio.mdio_read = efx_mcdi_mdio_read; + efx->mdio.mdio_write = efx_mcdi_mdio_write; + + /* Fill out MDIO structure, loopback modes, and initial link state */ + rc = efx->phy_op->probe(efx); + if (rc != 0) + return rc; + + /* Allocate buffer for stats */ + rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, + MC_CMD_MAC_NSTATS * sizeof(u64)); + if (rc) + return rc; + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64)efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64)virt_to_phys(efx->stats_buffer.addr)); + + efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); + + return 0; +} + +void efx_mcdi_port_remove(struct efx_nic *efx) +{ + efx->phy_op->remove(efx); + efx_nic_free_buffer(efx, &efx->stats_buffer); +} diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h index 11d148c..4f6eb81 100644 --- a/drivers/net/ethernet/sfc/phy.h +++ b/drivers/net/ethernet/sfc/phy.h @@ -47,21 +47,4 @@ extern const struct efx_phy_operations falcon_txc_phy_ops; extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); -/**************************************************************************** - * Siena managed PHYs - */ -extern const struct efx_phy_operations efx_mcdi_phy_ops; - -extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, - unsigned int prtad, unsigned int devad, - u16 addr, u16 *value_out, u32 *status_out); -extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, - unsigned int prtad, unsigned int devad, - u16 addr, u16 value, u32 *status_out); -extern void efx_mcdi_phy_decode_link(struct efx_nic *efx, - struct efx_link_state *link_state, - u32 speed, u32 flags, u32 fcntl); -extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx); -extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa); - #endif diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 1febedb..b5d5568 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -51,81 +51,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel) channel->channel); } -static int siena_mdio_write(struct net_device *net_dev, - int prtad, int devad, u16 addr, u16 value) -{ - struct efx_nic *efx = netdev_priv(net_dev); - uint32_t status; - int rc; - - rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, - addr, value, &status); - if (rc) - return rc; - if (status != MC_CMD_MDIO_STATUS_GOOD) - return -EIO; - - return 0; -} - -static int siena_mdio_read(struct net_device *net_dev, - int prtad, int devad, u16 addr) -{ - struct efx_nic *efx = netdev_priv(net_dev); - uint16_t value; - uint32_t status; - int rc; - - rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, - addr, &value, &status); - if (rc) - return rc; - if (status != MC_CMD_MDIO_STATUS_GOOD) - return -EIO; - - return (int)value; -} - -/* This call is responsible for hooking in the MAC and PHY operations */ -static int siena_probe_port(struct efx_nic *efx) -{ - int rc; - - /* Hook in PHY operations table */ - efx->phy_op = &efx_mcdi_phy_ops; - - /* Set up MDIO structure for PHY */ - efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - efx->mdio.mdio_read = siena_mdio_read; - efx->mdio.mdio_write = siena_mdio_write; - - /* Fill out MDIO structure, loopback modes, and initial link state */ - rc = efx->phy_op->probe(efx); - if (rc != 0) - return rc; - - /* Allocate buffer for stats */ - rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, - MC_CMD_MAC_NSTATS * sizeof(u64)); - if (rc) - return rc; - netif_dbg(efx, probe, efx->net_dev, - "stats buffer at %llx (virt %p phys %llx)\n", - (u64)efx->stats_buffer.dma_addr, - efx->stats_buffer.addr, - (u64)virt_to_phys(efx->stats_buffer.addr)); - - efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); - - return 0; -} - -static void siena_remove_port(struct efx_nic *efx) -{ - efx->phy_op->remove(efx); - efx_nic_free_buffer(efx, &efx->stats_buffer); -} - void siena_prepare_flush(struct efx_nic *efx) { if (efx->fc_disable++ == 0) @@ -447,8 +372,6 @@ static void siena_remove_nic(struct efx_nic *efx) efx->nic_data = NULL; } -#define STATS_GENERATION_INVALID ((__force __le64)(-1)) - static int siena_try_update_nic_stats(struct efx_nic *efx) { __le64 *dma_stats; @@ -459,7 +382,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) dma_stats = efx->stats_buffer.addr; generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; - if (generation_end == STATS_GENERATION_INVALID) + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) return 0; rmb(); @@ -562,21 +485,6 @@ static void siena_update_nic_stats(struct efx_nic *efx) /* Use the old values instead */ } -static void siena_start_nic_stats(struct efx_nic *efx) -{ - __le64 *dma_stats = efx->stats_buffer.addr; - - dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; - - efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, - MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); -} - -static void siena_stop_nic_stats(struct efx_nic *efx) -{ - efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); -} - static int siena_mac_reconfigure(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); @@ -691,18 +599,18 @@ const struct efx_nic_type siena_a0_nic_type = { .map_reset_reason = efx_mcdi_map_reset_reason, .map_reset_flags = siena_map_reset_flags, .reset = efx_mcdi_reset, - .probe_port = siena_probe_port, - .remove_port = siena_remove_port, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, .prepare_flush = siena_prepare_flush, .finish_flush = siena_finish_flush, .update_stats = siena_update_nic_stats, - .start_stats = siena_start_nic_stats, - .stop_stats = siena_stop_nic_stats, + .start_stats = efx_mcdi_mac_start_stats, + .stop_stats = efx_mcdi_mac_stop_stats, .set_id_led = efx_mcdi_set_id_led, .push_irq_moderation = siena_push_irq_moderation, .reconfigure_mac = siena_mac_reconfigure, .check_mac_fault = efx_mcdi_mac_check_fault, - .reconfigure_port = efx_mcdi_phy_reconfigure, + .reconfigure_port = efx_mcdi_port_reconfigure, .get_wol = siena_get_wol, .set_wol = siena_set_wol, .resume_wol = siena_init_wol, -- cgit v0.10.2 From f073dde03b3e8d11050d82f52caaf75fd924e069 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Sep 2012 02:33:55 +0100 Subject: sfc: Make efx_mcdi_init() call efx_mcdi_handle_assertion() This should probably be done during MCDI initialisation for any NIC. Change efx_mcdi_init() to return an error code. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 5132aba..2b9ef28 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -50,13 +50,10 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) return &nic_data->mcdi; } -void efx_mcdi_init(struct efx_nic *efx) +int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; - if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) - return; - mcdi = efx_mcdi(efx); init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); @@ -64,6 +61,9 @@ void efx_mcdi_init(struct efx_nic *efx) mcdi->mode = MCDI_MODE_POLL; (void) efx_mcdi_poll_reboot(efx); + + /* Recover from a failed assertion before probing */ + return efx_mcdi_handle_assertion(efx); } static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index c8f2cd5..6c58dbd 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -65,7 +65,7 @@ struct efx_mcdi_mon { unsigned int n_attrs; }; -extern void efx_mcdi_init(struct efx_nic *efx); +extern int efx_mcdi_init(struct efx_nic *efx); extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index b5d5568..3dca771 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -210,10 +210,7 @@ static int siena_probe_nic(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; - efx_mcdi_init(efx); - - /* Recover from a failed assertion before probing */ - rc = efx_mcdi_handle_assertion(efx); + rc = efx_mcdi_init(efx); if (rc) goto fail1; -- cgit v0.10.2 From f3ad50034425692965a0f2becdd9c45ecb45cd66 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Sep 2012 02:33:56 +0100 Subject: sfc: Make MCDI independent of Siena Move the lowest layer (transport) of the current MCDI code to per-NIC-type operations. Introduce a new structure and efx_nic member for MCDI-specific data. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 2b9ef28..d7d3a8a 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -24,13 +24,6 @@ #define MCDI_RPC_TIMEOUT (10 * HZ) -#define MCDI_PDU(efx) \ - (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) -#define MCDI_DOORBELL(efx) \ - (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) -#define MCDI_STATUS(efx) \ - (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) - /* A reboot/assertion causes the MCDI status word to be set after the * command word is set or a REBOOT event is sent. If we notice a reboot * via these mechanisms then wait 10ms for the status word to be set. */ @@ -44,16 +37,18 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) { - struct siena_nic_data *nic_data; - EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); - nic_data = efx->nic_data; - return &nic_data->mcdi; + EFX_BUG_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->iface; } int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; + efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); + if (!efx->mcdi) + return -ENOMEM; + mcdi = efx_mcdi(efx); init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); @@ -66,16 +61,19 @@ int efx_mcdi_init(struct efx_nic *efx) return efx_mcdi_handle_assertion(efx); } +void efx_mcdi_fini(struct efx_nic *efx) +{ + BUG_ON(efx->mcdi && + atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT); + kfree(efx->mcdi); +} + static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); - unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); - unsigned int i; efx_dword_t hdr; u32 xflags, seqno; - unsigned int inlen_dw = DIV_ROUND_UP(inlen, 4); BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V1); @@ -93,31 +91,18 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags); - efx_writed(efx, &hdr, pdu); - - for (i = 0; i < inlen_dw; i++) - efx_writed(efx, &inbuf[i], pdu + 4 + 4 * i); - - /* Ensure the payload is written out before the header */ - wmb(); - - /* ring the doorbell with a distinctive value */ - _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); + efx->type->mcdi_request(efx, &hdr, 4, inbuf, inlen); } static void efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); - unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); - int i; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(outlen > MCDI_CTL_SDU_LEN_MAX_V1); - for (i = 0; i < outlen_dw; i++) - efx_readd(efx, &outbuf[i], pdu + 4 + 4 * i); + efx->type->mcdi_read_response(efx, outbuf, 4, outlen); } static int efx_mcdi_poll(struct efx_nic *efx) @@ -125,7 +110,6 @@ static int efx_mcdi_poll(struct efx_nic *efx) struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned long time, finish; unsigned int respseq, respcmd, error; - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned int rc, spins; efx_dword_t reg; @@ -152,19 +136,14 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = jiffies; rmb(); - efx_readd(efx, ®, pdu); - - /* All 1's indicates that shared memory is in reset (and is - * not a valid header). Wait for it to come out reset before - * completing the command */ - if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && - EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) + if (efx->type->mcdi_poll_response(efx)) break; if (time_after(time, finish)) return -ETIMEDOUT; } + efx->type->mcdi_read_response(efx, ®, 0, 4); mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); @@ -179,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) respseq, mcdi->seqno); rc = EIO; } else if (error) { - efx_readd(efx, ®, pdu + 4); + efx->type->mcdi_read_response(efx, ®, 4, 4); switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ @@ -215,17 +194,13 @@ out: */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { - unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); - efx_dword_t reg; - uint32_t value; - - if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) - return false; + int rc; - efx_readd(efx, ®, addr); - value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + if (!efx->mcdi) + return 0; - if (value == 0) + rc = efx->type->mcdi_poll_reboot(efx); + if (!rc) return 0; /* MAC statistics have been cleared on the NIC; clear our copy @@ -233,13 +208,7 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx) */ memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); - EFX_ZERO_DWORD(reg); - efx_writed(efx, ®, addr); - - if (value == MC_STATUS_DWORD_ASSERT) - return -EINTR; - else - return -EIO; + return rc; } static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) @@ -345,8 +314,6 @@ void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); - efx_mcdi_acquire(mcdi); /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ @@ -364,8 +331,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, struct efx_mcdi_iface *mcdi = efx_mcdi(efx); int rc; - BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); - if (mcdi->mode == MCDI_MODE_POLL) rc = efx_mcdi_poll(efx); else @@ -426,7 +391,7 @@ void efx_mcdi_mode_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; - if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) + if (!efx->mcdi) return; mcdi = efx_mcdi(efx); @@ -450,7 +415,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; - if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) + if (!efx->mcdi) return; mcdi = efx_mcdi(efx); diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 6c58dbd..0bfed2a 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -65,7 +65,28 @@ struct efx_mcdi_mon { unsigned int n_attrs; }; +/** + * struct efx_mcdi_data - extra state for NICs that implement MCDI + * @iface: Interface/protocol state + * @hwmon: Hardware monitor state + */ +struct efx_mcdi_data { + struct efx_mcdi_iface iface; +#ifdef CONFIG_SFC_MCDI_MON + struct efx_mcdi_mon hwmon; +#endif +}; + +#ifdef CONFIG_SFC_MCDI_MON +static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) +{ + EFX_BUG_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->hwmon; +} +#endif + extern int efx_mcdi_init(struct efx_nic *efx); +extern void efx_mcdi_fini(struct efx_nic *efx); extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index bdded38..fb9361f 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -718,6 +718,7 @@ struct vfdi_status; * @selftest_work: Work item for asynchronous self-test * @mtd_list: List of MTDs attached to the NIC * @nic_data: Hardware dependent state + * @mcdi: Management-Controller-to-Driver Interface state * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, * efx_monitor() and efx_reconfigure_port() * @port_enabled: Port enabled indicator. @@ -847,6 +848,7 @@ struct efx_nic { #endif void *nic_data; + struct efx_mcdi_data *mcdi; struct mutex mac_lock; struct work_struct mac_work; @@ -956,6 +958,17 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @test_chip: Test registers. Should use efx_nic_test_registers(), and is * expected to reset the NIC. * @test_nvram: Test validity of NVRAM contents + * @mcdi_request: Send an MCDI request with the given header and SDU. + * The SDU length may be any value from 0 up to the protocol- + * defined maximum, but its buffer will be padded to a multiple + * of 4 bytes. + * @mcdi_poll_response: Test whether an MCDI response is available. + * @mcdi_read_response: Read the MCDI response PDU. The offset will + * be a multiple of 4. The length may not be, but the buffer + * will be padded so it is safe to round up. + * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, + * return an appropriate error code for aborting any current + * request; otherwise return 0. * @revision: Hardware architecture revision * @mem_map_size: Memory BAR mapped size * @txd_ptr_tbl_base: TX descriptor ring base address @@ -1004,6 +1017,13 @@ struct efx_nic_type { void (*resume_wol)(struct efx_nic *efx); int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); int (*test_nvram)(struct efx_nic *efx); + void (*mcdi_request)(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len); + bool (*mcdi_poll_response)(struct efx_nic *efx); + void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, + size_t pdu_offset, size_t pdu_len); + int (*mcdi_poll_reboot)(struct efx_nic *efx); int revision; unsigned int mem_map_size; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 0d38cc2..c699203 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -140,28 +140,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) /** * struct siena_nic_data - Siena NIC state - * @mcdi: Management-Controller-to-Driver Interface * @wol_filter_id: Wake-on-LAN packet filter id - * @hwmon: Hardware monitor state */ struct siena_nic_data { - struct efx_mcdi_iface mcdi; int wol_filter_id; -#ifdef CONFIG_SFC_MCDI_MON - struct efx_mcdi_mon hwmon; -#endif }; -#ifdef CONFIG_SFC_MCDI_MON -static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data; - EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); - nic_data = efx->nic_data; - return &nic_data->hwmon; -} -#endif - /* * On the SFC9000 family each port is associated with 1 PCI physical * function (PF) handled by sfc and a configurable number of virtual diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 3dca771..73b511a 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -274,6 +274,7 @@ fail4: fail3: efx_mcdi_drv_attach(efx, false, NULL); fail2: + efx_mcdi_fini(efx); fail1: kfree(efx->nic_data); return rc; @@ -367,6 +368,8 @@ static void siena_remove_nic(struct efx_nic *efx) /* Tear down the private nic state */ kfree(efx->nic_data); efx->nic_data = NULL; + + efx_mcdi_fini(efx); } static int siena_try_update_nic_stats(struct efx_nic *efx) @@ -574,6 +577,89 @@ static void siena_init_wol(struct efx_nic *efx) } } +/************************************************************************** + * + * MCDI + * + ************************************************************************** + */ + +#define MCDI_PDU(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) +#define MCDI_DOORBELL(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) +#define MCDI_STATUS(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) + +static void siena_mcdi_request(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); + unsigned int i; + unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4); + + EFX_BUG_ON_PARANOID(hdr_len != 4); + + efx_writed(efx, hdr, pdu); + + for (i = 0; i < inlen_dw; i++) + efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i); + + /* Ensure the request is written out before the doorbell */ + wmb(); + + /* ring the doorbell with a distinctive value */ + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); +} + +static bool siena_mcdi_poll_response(struct efx_nic *efx) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + efx_dword_t hdr; + + efx_readd(efx, &hdr, pdu); + + /* All 1's indicates that shared memory is in reset (and is + * not a valid hdr). Wait for it to come out reset before + * completing the command + */ + return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff && + EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, + size_t offset, size_t outlen) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); + int i; + + for (i = 0; i < outlen_dw; i++) + efx_readd(efx, &outbuf[i], pdu + offset + 4 * i); +} + +static int siena_mcdi_poll_reboot(struct efx_nic *efx) +{ + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); + efx_dword_t reg; + u32 value; + + efx_readd(efx, ®, addr); + value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + + if (value == 0) + return 0; + + EFX_ZERO_DWORD(reg); + efx_writed(efx, ®, addr); + + if (value == MC_STATUS_DWORD_ASSERT) + return -EINTR; + else + return -EIO; +} /************************************************************************** * @@ -613,6 +699,10 @@ const struct efx_nic_type siena_a0_nic_type = { .resume_wol = siena_init_wol, .test_chip = siena_test_chip, .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = siena_mcdi_request, + .mcdi_poll_response = siena_mcdi_poll_response, + .mcdi_read_response = siena_mcdi_read_response, + .mcdi_poll_reboot = siena_mcdi_poll_reboot, .revision = EFX_REV_SIENA_A0, .mem_map_size = (FR_CZ_MC_TREG_SMEM + -- cgit v0.10.2 From 0d19a540beb78493cd5acb7428760af0dc1ea154 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Sep 2012 21:59:52 +0100 Subject: sfc: Add GFP flags to efx_nic_alloc_buffer() and make most callers allow blocking Most call sites for efx_nic_alloc_buffer() are part of the probe or reconfiguration paths and can allocate with GFP_KERNEL. A few others should use GFP_NOIO (I think). Only one is in atomic context and must use the current GFP_ATOMIC. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 5228500..395d89d 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -1418,7 +1418,7 @@ static int falcon_probe_port(struct efx_nic *efx) /* Allocate buffer for stats */ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, - FALCON_MAC_STATS_SIZE); + FALCON_MAC_STATS_SIZE, GFP_KERNEL); if (rc) return rc; netif_dbg(efx, probe, efx->net_dev, @@ -2035,7 +2035,8 @@ static int falcon_probe_nic(struct efx_nic *efx) } /* Allocate memory for INT_KER */ - rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); + rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), + GFP_KERNEL); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 3179b2b..958c73f 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -261,7 +261,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) return -EIO; rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf, - 4 * MC_CMD_SENSOR_ENTRY_MAXNUM); + 4 * MC_CMD_SENSOR_ENTRY_MAXNUM, GFP_KERNEL); if (rc) return rc; diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 8f31e3d..91df43f 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -989,7 +989,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx) /* Allocate buffer for stats */ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, - MC_CMD_MAC_NSTATS * sizeof(u64)); + MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL); if (rc) return rc; netif_dbg(efx, probe, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 56ed3bc..efe2773 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -303,11 +303,11 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) **************************************************************************/ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, - unsigned int len) + unsigned int len, gfp_t gfp_flags) { buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, &buffer->dma_addr, - GFP_ATOMIC | __GFP_ZERO); + gfp_flags | __GFP_ZERO); if (!buffer->addr) return -ENOMEM; buffer->len = len; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index c699203..9120e8b 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -332,7 +332,7 @@ extern void efx_nic_init_common(struct efx_nic *efx); extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, - unsigned int len); + unsigned int len, gfp_t gfp_flags); void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); /* Tests */ diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index d96bfc4..56a8b88 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -875,7 +875,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) if (!efx->ptp_data) return -ENOMEM; - rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int)); + rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); if (rc != 0) goto fail1; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 73b511a..f0ae262 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -237,7 +237,8 @@ static int siena_probe_nic(struct efx_nic *efx) siena_init_wol(efx); /* Allocate memory for INT_KER */ - rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); + rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), + GFP_KERNEL); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 2587d30..6258e7f 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -997,7 +997,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work) struct efx_nic *efx = vf->efx; struct efx_buffer buf; - if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) { + if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { efx_sriov_reset_vf(vf, &buf); efx_nic_free_buffer(efx, &buf); } @@ -1241,7 +1241,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) pci_domain_nr(pci_dev->bus), pci_dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); - rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE); + rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, + GFP_KERNEL); if (rc) goto fail; @@ -1273,7 +1274,8 @@ int efx_sriov_init(struct efx_nic *efx) if (rc) goto fail_cmd; - rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status)); + rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status), + GFP_KERNEL); if (rc) goto fail_status; vfdi_status = efx->vfdi_status.addr; @@ -1528,7 +1530,7 @@ void efx_sriov_reset(struct efx_nic *efx) efx_sriov_usrev(efx, true); (void)efx_sriov_cmd(efx, true, NULL, NULL); - if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) + if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) return; for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 5e090e5..c0d4040 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -708,7 +708,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; if (unlikely(!page_buf->addr) && - efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE)) + efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, + GFP_ATOMIC)) return NULL; result = (u8 *)page_buf->addr + offset; -- cgit v0.10.2 From caa7558655d5ae1997c6d52f87b59f8ec6f2c3af Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 19 Sep 2012 00:31:42 +0100 Subject: sfc: Make struct efx_special_buffer less special On EF10, the firmware is in charge of allocating buffer table entries. Change struct efx_special_buffer to use a struct efx_buffer member, so that it can be used with efx_nic_{alloc,free}_buffer() in that case. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index fb9361f..ea64cd8 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -93,21 +93,36 @@ struct efx_ptp_data; struct efx_self_tests; /** - * struct efx_special_buffer - An Efx special buffer - * @addr: CPU base address of the buffer + * struct efx_buffer - A general-purpose DMA buffer + * @addr: host base address of the buffer * @dma_addr: DMA base address of the buffer * @len: Buffer length, in bytes - * @index: Buffer index within controller;s buffer table - * @entries: Number of buffer table entries * - * Special buffers are used for the event queues and the TX and RX - * descriptor queues for each channel. They are *not* used for the - * actual transmit and receive buffers. + * The NIC uses these buffers for its interrupt status registers and + * MAC stats dumps. */ -struct efx_special_buffer { +struct efx_buffer { void *addr; dma_addr_t dma_addr; unsigned int len; +}; + +/** + * struct efx_special_buffer - DMA buffer entered into buffer table + * @buf: Standard &struct efx_buffer + * @index: Buffer index within controller;s buffer table + * @entries: Number of buffer table entries + * + * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE. + * Event and descriptor rings are addressed via one or more buffer + * table entries (and so can be physically non-contiguous, although we + * currently do not take advantage of that). On Falcon and Siena we + * have to take care of allocating and initialising the entries + * ourselves. On later hardware this is managed by the firmware and + * @index and @entries are left as 0. + */ +struct efx_special_buffer { + struct efx_buffer buf; unsigned int index; unsigned int entries; }; @@ -325,22 +340,6 @@ struct efx_rx_queue { unsigned int slow_fill_count; }; -/** - * struct efx_buffer - An Efx general-purpose buffer - * @addr: host base address of the buffer - * @dma_addr: DMA base address of the buffer - * @len: Buffer length, in bytes - * - * The NIC uses these buffers for its interrupt status registers and - * MAC stats dumps. - */ -struct efx_buffer { - void *addr; - dma_addr_t dma_addr; - unsigned int len; -}; - - enum efx_rx_alloc_method { RX_ALLOC_METHOD_AUTO = 0, RX_ALLOC_METHOD_SKB = 1, diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index efe2773..372891c 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -93,7 +93,7 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, static inline efx_qword_t *efx_event(struct efx_channel *channel, unsigned int index) { - return ((efx_qword_t *) (channel->eventq.addr)) + + return ((efx_qword_t *) (channel->eventq.buf.addr)) + (index & channel->eventq_mask); } @@ -196,12 +196,12 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) dma_addr_t dma_addr; int i; - EFX_BUG_ON_PARANOID(!buffer->addr); + EFX_BUG_ON_PARANOID(!buffer->buf.addr); /* Write buffer descriptors to NIC */ for (i = 0; i < buffer->entries; i++) { index = buffer->index + i; - dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); + dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); netif_dbg(efx, probe, efx->net_dev, "mapping special buffer %d at %llx\n", index, (unsigned long long)dma_addr); @@ -250,13 +250,10 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, { len = ALIGN(len, EFX_BUF_SIZE); - buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, - &buffer->dma_addr, GFP_KERNEL); - if (!buffer->addr) + if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) return -ENOMEM; - buffer->len = len; buffer->entries = len / EFX_BUF_SIZE; - BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); + BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); /* Select new buffer ID */ buffer->index = efx->next_buffer_table; @@ -270,8 +267,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, "allocating special buffers %d-%d at %llx+%x " "(virt %p phys %llx)\n", buffer->index, buffer->index + buffer->entries - 1, - (u64)buffer->dma_addr, len, - buffer->addr, (u64)virt_to_phys(buffer->addr)); + (u64)buffer->buf.dma_addr, len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); return 0; } @@ -279,19 +276,17 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, static void efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) { - if (!buffer->addr) + if (!buffer->buf.addr) return; netif_dbg(efx, hw, efx->net_dev, "deallocating special buffers %d-%d at %llx+%x " "(virt %p phys %llx)\n", buffer->index, buffer->index + buffer->entries - 1, - (u64)buffer->dma_addr, buffer->len, - buffer->addr, (u64)virt_to_phys(buffer->addr)); + (u64)buffer->buf.dma_addr, buffer->buf.len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); - dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, - buffer->dma_addr); - buffer->addr = NULL; + efx_nic_free_buffer(efx, &buffer->buf); buffer->entries = 0; } @@ -335,7 +330,7 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) static inline efx_qword_t * efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) { - return ((efx_qword_t *) (tx_queue->txd.addr)) + index; + return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; } /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ @@ -534,7 +529,7 @@ void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) static inline efx_qword_t * efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) { - return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; + return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; } /* This creates an entry in the RX descriptor queue */ @@ -1415,7 +1410,7 @@ void efx_nic_init_eventq(struct efx_channel *channel) efx_init_special_buffer(efx, &channel->eventq); /* Fill event queue with all ones (i.e. empty events) */ - memset(channel->eventq.addr, 0xff, channel->eventq.len); + memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); /* Push event queue to card */ EFX_POPULATE_OWORD_3(reg, -- cgit v0.10.2 From 8b8a95a11aa985b7f6f6df8a0ffa597e56ff8310 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Sep 2012 01:57:07 +0100 Subject: sfc: Rename Falcon-architecture register definitions The EF10 architecture has a very different register layout from previous controllers, so we'll use separate files for the two sets of register definitions. Use 'farch' as an abbreviation for Falcon-architecture. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 395d89d..f87710e 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -21,7 +21,7 @@ #include "efx.h" #include "spi.h" #include "nic.h" -#include "regs.h" +#include "farch_regs.h" #include "io.h" #include "phy.h" #include "workarounds.h" diff --git a/drivers/net/ethernet/sfc/farch_regs.h b/drivers/net/ethernet/sfc/farch_regs.h new file mode 100644 index 0000000..00ef17a --- /dev/null +++ b/drivers/net/ethernet/sfc/farch_regs.h @@ -0,0 +1,2928 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2010 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_FARCH_REGS_H +#define EFX_FARCH_REGS_H + +/* + * Falcon hardware architecture definitions have a name prefix following + * the format: + * + * F__ + * + * The following strings are used: + * + * MMIO register MC register Host memory structure + * ------------------------------------------------------------- + * Address R MCR + * Bitfield RF MCRF SF + * Enumerator FE MCFE SE + * + * is the first revision to which the definition applies: + * + * A: Falcon A1 (SFC4000AB) + * B: Falcon B0 (SFC4000BA) + * C: Siena A0 (SFL9021AA) + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * Falcon/Siena registers and descriptors + * + ************************************************************************** + */ + +/* ADR_REGION_REG: Address region register */ +#define FR_AZ_ADR_REGION 0x00000000 +#define FRF_AZ_ADR_REGION3_LBN 96 +#define FRF_AZ_ADR_REGION3_WIDTH 18 +#define FRF_AZ_ADR_REGION2_LBN 64 +#define FRF_AZ_ADR_REGION2_WIDTH 18 +#define FRF_AZ_ADR_REGION1_LBN 32 +#define FRF_AZ_ADR_REGION1_WIDTH 18 +#define FRF_AZ_ADR_REGION0_LBN 0 +#define FRF_AZ_ADR_REGION0_WIDTH 18 + +/* INT_EN_REG_KER: Kernel driver Interrupt enable register */ +#define FR_AZ_INT_EN_KER 0x00000010 +#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 +#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 +#define FRF_AZ_KER_INT_CHAR_LBN 4 +#define FRF_AZ_KER_INT_CHAR_WIDTH 1 +#define FRF_AZ_KER_INT_KER_LBN 3 +#define FRF_AZ_KER_INT_KER_WIDTH 1 +#define FRF_AZ_DRV_INT_EN_KER_LBN 0 +#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 + +/* INT_EN_REG_CHAR: Char Driver interrupt enable register */ +#define FR_BZ_INT_EN_CHAR 0x00000020 +#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 +#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 +#define FRF_BZ_CHAR_INT_CHAR_LBN 4 +#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 +#define FRF_BZ_CHAR_INT_KER_LBN 3 +#define FRF_BZ_CHAR_INT_KER_WIDTH 1 +#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 +#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 + +/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ +#define FR_AZ_INT_ADR_KER 0x00000030 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 +#define FRF_AZ_INT_ADR_KER_LBN 0 +#define FRF_AZ_INT_ADR_KER_WIDTH 64 + +/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ +#define FR_BZ_INT_ADR_CHAR 0x00000040 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 +#define FRF_BZ_INT_ADR_CHAR_LBN 0 +#define FRF_BZ_INT_ADR_CHAR_WIDTH 64 + +/* INT_ACK_KER: Kernel interrupt acknowledge register */ +#define FR_AA_INT_ACK_KER 0x00000050 +#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 +#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 + +/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ +#define FR_BZ_INT_ISR0 0x00000090 +#define FRF_BZ_INT_ISR_REG_LBN 0 +#define FRF_BZ_INT_ISR_REG_WIDTH 64 + +/* HW_INIT_REG: Hardware initialization register */ +#define FR_AZ_HW_INIT 0x000000c0 +#define FRF_BB_BDMRD_CPLF_FULL_LBN 124 +#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 +#define FRF_CZ_TX_MRG_TAGS_LBN 120 +#define FRF_CZ_TX_MRG_TAGS_WIDTH 1 +#define FRF_AB_TRGT_MASK_ALL_LBN 100 +#define FRF_AB_TRGT_MASK_ALL_WIDTH 1 +#define FRF_AZ_DOORBELL_DROP_LBN 92 +#define FRF_AZ_DOORBELL_DROP_WIDTH 8 +#define FRF_AB_TX_RREQ_MASK_EN_LBN 76 +#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 +#define FRF_AB_PE_EIDLE_DIS_LBN 75 +#define FRF_AB_PE_EIDLE_DIS_WIDTH 1 +#define FRF_AA_FC_BLOCKING_EN_LBN 45 +#define FRF_AA_FC_BLOCKING_EN_WIDTH 1 +#define FRF_BZ_B2B_REQ_EN_LBN 45 +#define FRF_BZ_B2B_REQ_EN_WIDTH 1 +#define FRF_AA_B2B_REQ_EN_LBN 44 +#define FRF_AA_B2B_REQ_EN_WIDTH 1 +#define FRF_BB_FC_BLOCKING_EN_LBN 44 +#define FRF_BB_FC_BLOCKING_EN_WIDTH 1 +#define FRF_AZ_POST_WR_MASK_LBN 40 +#define FRF_AZ_POST_WR_MASK_WIDTH 4 +#define FRF_AZ_TLP_TC_LBN 34 +#define FRF_AZ_TLP_TC_WIDTH 3 +#define FRF_AZ_TLP_ATTR_LBN 32 +#define FRF_AZ_TLP_ATTR_WIDTH 2 +#define FRF_AB_INTB_VEC_LBN 24 +#define FRF_AB_INTB_VEC_WIDTH 5 +#define FRF_AB_INTA_VEC_LBN 16 +#define FRF_AB_INTA_VEC_WIDTH 5 +#define FRF_AZ_WD_TIMER_LBN 8 +#define FRF_AZ_WD_TIMER_WIDTH 8 +#define FRF_AZ_US_DISABLE_LBN 5 +#define FRF_AZ_US_DISABLE_WIDTH 1 +#define FRF_AZ_TLP_EP_LBN 4 +#define FRF_AZ_TLP_EP_WIDTH 1 +#define FRF_AZ_ATTR_SEL_LBN 3 +#define FRF_AZ_ATTR_SEL_WIDTH 1 +#define FRF_AZ_TD_SEL_LBN 1 +#define FRF_AZ_TD_SEL_WIDTH 1 +#define FRF_AZ_TLP_TD_LBN 0 +#define FRF_AZ_TLP_TD_WIDTH 1 + +/* EE_SPI_HCMD_REG: SPI host command register */ +#define FR_AB_EE_SPI_HCMD 0x00000100 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 +#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 +#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 +#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 +#define FRF_AB_EE_SPI_HCMD_READ_LBN 15 +#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 +#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 + +/* USR_EV_CFG: User Level Event Configuration register */ +#define FR_CZ_USR_EV_CFG 0x00000100 +#define FRF_CZ_USREV_DIS_LBN 16 +#define FRF_CZ_USREV_DIS_WIDTH 1 +#define FRF_CZ_DFLT_EVQ_LBN 0 +#define FRF_CZ_DFLT_EVQ_WIDTH 10 + +/* EE_SPI_HADR_REG: SPI host address register */ +#define FR_AB_EE_SPI_HADR 0x00000110 +#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 +#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 +#define FRF_AB_EE_SPI_HADR_ADR_LBN 0 +#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 + +/* EE_SPI_HDATA_REG: SPI host data register */ +#define FR_AB_EE_SPI_HDATA 0x00000120 +#define FRF_AB_EE_SPI_HDATA3_LBN 96 +#define FRF_AB_EE_SPI_HDATA3_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA2_LBN 64 +#define FRF_AB_EE_SPI_HDATA2_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA1_LBN 32 +#define FRF_AB_EE_SPI_HDATA1_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA0_LBN 0 +#define FRF_AB_EE_SPI_HDATA0_WIDTH 32 + +/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ +#define FR_AB_EE_BASE_PAGE 0x00000130 +#define FRF_AB_EE_EXPROM_MASK_LBN 16 +#define FRF_AB_EE_EXPROM_MASK_WIDTH 13 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 + +/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ +#define FR_AB_EE_VPD_CFG0 0x00000140 +#define FRF_AB_EE_SF_FASTRD_EN_LBN 127 +#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 +#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 +#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_VPD_WIP_POLL_LBN 119 +#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 +#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 +#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 +#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 +#define FRF_AB_EE_VPDW_LENGTH_LBN 80 +#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPDW_BASE_LBN 64 +#define FRF_AB_EE_VPDW_BASE_WIDTH 15 +#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 +#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 +#define FRF_AB_EE_VPD_BASE_LBN 32 +#define FRF_AB_EE_VPD_BASE_WIDTH 24 +#define FRF_AB_EE_VPD_LENGTH_LBN 16 +#define FRF_AB_EE_VPD_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPD_AD_SIZE_LBN 8 +#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 +#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 +#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 +#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 +#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 +#define FRF_AB_EE_VPD_EN_LBN 0 +#define FRF_AB_EE_VPD_EN_WIDTH 1 + +/* EE_VPD_SW_CNTL_REG: VPD access SW control register */ +#define FR_AB_EE_VPD_SW_CNTL 0x00000150 +#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 +#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 +#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_ADR_LBN 0 +#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 + +/* EE_VPD_SW_DATA_REG: VPD access SW data register */ +#define FR_AB_EE_VPD_SW_DATA 0x00000160 +#define FRF_AB_EE_VPD_CYC_DAT_LBN 0 +#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 + +/* PBMX_DBG_IADDR_REG: Capture Module address register */ +#define FR_CZ_PBMX_DBG_IADDR 0x000001f0 +#define FRF_CZ_PBMX_DBG_IADDR_LBN 0 +#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 + +/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ +#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 +#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 +#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 + +/* PBMX_DBG_IDATA_REG: Capture Module data register */ +#define FR_CZ_PBMX_DBG_IDATA 0x000001f8 +#define FRF_CZ_PBMX_DBG_IDATA_LBN 0 +#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 + +/* NIC_STAT_REG: NIC status register */ +#define FR_AB_NIC_STAT 0x00000200 +#define FRF_BB_AER_DIS_LBN 34 +#define FRF_BB_AER_DIS_WIDTH 1 +#define FRF_BB_EE_STRAP_EN_LBN 31 +#define FRF_BB_EE_STRAP_EN_WIDTH 1 +#define FRF_BB_EE_STRAP_LBN 24 +#define FRF_BB_EE_STRAP_WIDTH 4 +#define FRF_BB_REVISION_ID_LBN 17 +#define FRF_BB_REVISION_ID_WIDTH 7 +#define FRF_AB_ONCHIP_SRAM_LBN 16 +#define FRF_AB_ONCHIP_SRAM_WIDTH 1 +#define FRF_AB_SF_PRST_LBN 9 +#define FRF_AB_SF_PRST_WIDTH 1 +#define FRF_AB_EE_PRST_LBN 8 +#define FRF_AB_EE_PRST_WIDTH 1 +#define FRF_AB_ATE_MODE_LBN 3 +#define FRF_AB_ATE_MODE_WIDTH 1 +#define FRF_AB_STRAP_PINS_LBN 0 +#define FRF_AB_STRAP_PINS_WIDTH 3 + +/* GPIO_CTL_REG: GPIO control register */ +#define FR_AB_GPIO_CTL 0x00000210 +#define FRF_AB_GPIO_OUT3_LBN 112 +#define FRF_AB_GPIO_OUT3_WIDTH 16 +#define FRF_AB_GPIO_IN3_LBN 104 +#define FRF_AB_GPIO_IN3_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 +#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 +#define FRF_AB_GPIO_OUT2_LBN 80 +#define FRF_AB_GPIO_OUT2_WIDTH 16 +#define FRF_AB_GPIO_IN2_LBN 72 +#define FRF_AB_GPIO_IN2_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 +#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 +#define FRF_AB_GPIO15_OEN_LBN 63 +#define FRF_AB_GPIO15_OEN_WIDTH 1 +#define FRF_AB_GPIO14_OEN_LBN 62 +#define FRF_AB_GPIO14_OEN_WIDTH 1 +#define FRF_AB_GPIO13_OEN_LBN 61 +#define FRF_AB_GPIO13_OEN_WIDTH 1 +#define FRF_AB_GPIO12_OEN_LBN 60 +#define FRF_AB_GPIO12_OEN_WIDTH 1 +#define FRF_AB_GPIO11_OEN_LBN 59 +#define FRF_AB_GPIO11_OEN_WIDTH 1 +#define FRF_AB_GPIO10_OEN_LBN 58 +#define FRF_AB_GPIO10_OEN_WIDTH 1 +#define FRF_AB_GPIO9_OEN_LBN 57 +#define FRF_AB_GPIO9_OEN_WIDTH 1 +#define FRF_AB_GPIO8_OEN_LBN 56 +#define FRF_AB_GPIO8_OEN_WIDTH 1 +#define FRF_AB_GPIO15_OUT_LBN 55 +#define FRF_AB_GPIO15_OUT_WIDTH 1 +#define FRF_AB_GPIO14_OUT_LBN 54 +#define FRF_AB_GPIO14_OUT_WIDTH 1 +#define FRF_AB_GPIO13_OUT_LBN 53 +#define FRF_AB_GPIO13_OUT_WIDTH 1 +#define FRF_AB_GPIO12_OUT_LBN 52 +#define FRF_AB_GPIO12_OUT_WIDTH 1 +#define FRF_AB_GPIO11_OUT_LBN 51 +#define FRF_AB_GPIO11_OUT_WIDTH 1 +#define FRF_AB_GPIO10_OUT_LBN 50 +#define FRF_AB_GPIO10_OUT_WIDTH 1 +#define FRF_AB_GPIO9_OUT_LBN 49 +#define FRF_AB_GPIO9_OUT_WIDTH 1 +#define FRF_AB_GPIO8_OUT_LBN 48 +#define FRF_AB_GPIO8_OUT_WIDTH 1 +#define FRF_AB_GPIO15_IN_LBN 47 +#define FRF_AB_GPIO15_IN_WIDTH 1 +#define FRF_AB_GPIO14_IN_LBN 46 +#define FRF_AB_GPIO14_IN_WIDTH 1 +#define FRF_AB_GPIO13_IN_LBN 45 +#define FRF_AB_GPIO13_IN_WIDTH 1 +#define FRF_AB_GPIO12_IN_LBN 44 +#define FRF_AB_GPIO12_IN_WIDTH 1 +#define FRF_AB_GPIO11_IN_LBN 43 +#define FRF_AB_GPIO11_IN_WIDTH 1 +#define FRF_AB_GPIO10_IN_LBN 42 +#define FRF_AB_GPIO10_IN_WIDTH 1 +#define FRF_AB_GPIO9_IN_LBN 41 +#define FRF_AB_GPIO9_IN_WIDTH 1 +#define FRF_AB_GPIO8_IN_LBN 40 +#define FRF_AB_GPIO8_IN_WIDTH 1 +#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 +#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 +#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 +#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 +#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 +#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 +#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 +#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 +#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_CLK156_OUT_EN_LBN 31 +#define FRF_AB_CLK156_OUT_EN_WIDTH 1 +#define FRF_AB_USE_NIC_CLK_LBN 30 +#define FRF_AB_USE_NIC_CLK_WIDTH 1 +#define FRF_AB_GPIO5_OEN_LBN 29 +#define FRF_AB_GPIO5_OEN_WIDTH 1 +#define FRF_AB_GPIO4_OEN_LBN 28 +#define FRF_AB_GPIO4_OEN_WIDTH 1 +#define FRF_AB_GPIO3_OEN_LBN 27 +#define FRF_AB_GPIO3_OEN_WIDTH 1 +#define FRF_AB_GPIO2_OEN_LBN 26 +#define FRF_AB_GPIO2_OEN_WIDTH 1 +#define FRF_AB_GPIO1_OEN_LBN 25 +#define FRF_AB_GPIO1_OEN_WIDTH 1 +#define FRF_AB_GPIO0_OEN_LBN 24 +#define FRF_AB_GPIO0_OEN_WIDTH 1 +#define FRF_AB_GPIO7_OUT_LBN 23 +#define FRF_AB_GPIO7_OUT_WIDTH 1 +#define FRF_AB_GPIO6_OUT_LBN 22 +#define FRF_AB_GPIO6_OUT_WIDTH 1 +#define FRF_AB_GPIO5_OUT_LBN 21 +#define FRF_AB_GPIO5_OUT_WIDTH 1 +#define FRF_AB_GPIO4_OUT_LBN 20 +#define FRF_AB_GPIO4_OUT_WIDTH 1 +#define FRF_AB_GPIO3_OUT_LBN 19 +#define FRF_AB_GPIO3_OUT_WIDTH 1 +#define FRF_AB_GPIO2_OUT_LBN 18 +#define FRF_AB_GPIO2_OUT_WIDTH 1 +#define FRF_AB_GPIO1_OUT_LBN 17 +#define FRF_AB_GPIO1_OUT_WIDTH 1 +#define FRF_AB_GPIO0_OUT_LBN 16 +#define FRF_AB_GPIO0_OUT_WIDTH 1 +#define FRF_AB_GPIO7_IN_LBN 15 +#define FRF_AB_GPIO7_IN_WIDTH 1 +#define FRF_AB_GPIO6_IN_LBN 14 +#define FRF_AB_GPIO6_IN_WIDTH 1 +#define FRF_AB_GPIO5_IN_LBN 13 +#define FRF_AB_GPIO5_IN_WIDTH 1 +#define FRF_AB_GPIO4_IN_LBN 12 +#define FRF_AB_GPIO4_IN_WIDTH 1 +#define FRF_AB_GPIO3_IN_LBN 11 +#define FRF_AB_GPIO3_IN_WIDTH 1 +#define FRF_AB_GPIO2_IN_LBN 10 +#define FRF_AB_GPIO2_IN_WIDTH 1 +#define FRF_AB_GPIO1_IN_LBN 9 +#define FRF_AB_GPIO1_IN_WIDTH 1 +#define FRF_AB_GPIO0_IN_LBN 8 +#define FRF_AB_GPIO0_IN_WIDTH 1 +#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 +#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 +#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 +#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 +#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 +#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 +#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 +#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 + +/* GLB_CTL_REG: Global control register */ +#define FR_AB_GLB_CTL 0x00000220 +#define FRF_AB_EXT_PHY_RST_CTL_LBN 63 +#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 +#define FRF_AB_XAUI_SD_RST_CTL_LBN 62 +#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_SD_RST_CTL_LBN 61 +#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 +#define FRF_AA_PCIX_RST_CTL_LBN 60 +#define FRF_AA_PCIX_RST_CTL_WIDTH 1 +#define FRF_BB_BIU_RST_CTL_LBN 60 +#define FRF_BB_BIU_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 +#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 +#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 +#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 +#define FRF_AB_XGRX_RST_CTL_LBN 56 +#define FRF_AB_XGRX_RST_CTL_WIDTH 1 +#define FRF_AB_XGTX_RST_CTL_LBN 55 +#define FRF_AB_XGTX_RST_CTL_WIDTH 1 +#define FRF_AB_EM_RST_CTL_LBN 54 +#define FRF_AB_EM_RST_CTL_WIDTH 1 +#define FRF_AB_EV_RST_CTL_LBN 53 +#define FRF_AB_EV_RST_CTL_WIDTH 1 +#define FRF_AB_SR_RST_CTL_LBN 52 +#define FRF_AB_SR_RST_CTL_WIDTH 1 +#define FRF_AB_RX_RST_CTL_LBN 51 +#define FRF_AB_RX_RST_CTL_WIDTH 1 +#define FRF_AB_TX_RST_CTL_LBN 50 +#define FRF_AB_TX_RST_CTL_WIDTH 1 +#define FRF_AB_EE_RST_CTL_LBN 49 +#define FRF_AB_EE_RST_CTL_WIDTH 1 +#define FRF_AB_CS_RST_CTL_LBN 48 +#define FRF_AB_CS_RST_CTL_WIDTH 1 +#define FRF_AB_HOT_RST_CTL_LBN 40 +#define FRF_AB_HOT_RST_CTL_WIDTH 2 +#define FRF_AB_RST_EXT_PHY_LBN 31 +#define FRF_AB_RST_EXT_PHY_WIDTH 1 +#define FRF_AB_RST_XAUI_SD_LBN 30 +#define FRF_AB_RST_XAUI_SD_WIDTH 1 +#define FRF_AB_RST_PCIE_SD_LBN 29 +#define FRF_AB_RST_PCIE_SD_WIDTH 1 +#define FRF_AA_RST_PCIX_LBN 28 +#define FRF_AA_RST_PCIX_WIDTH 1 +#define FRF_BB_RST_BIU_LBN 28 +#define FRF_BB_RST_BIU_WIDTH 1 +#define FRF_AB_RST_PCIE_STKY_LBN 27 +#define FRF_AB_RST_PCIE_STKY_WIDTH 1 +#define FRF_AB_RST_PCIE_NSTKY_LBN 26 +#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 +#define FRF_AB_RST_PCIE_CORE_LBN 25 +#define FRF_AB_RST_PCIE_CORE_WIDTH 1 +#define FRF_AB_RST_XGRX_LBN 24 +#define FRF_AB_RST_XGRX_WIDTH 1 +#define FRF_AB_RST_XGTX_LBN 23 +#define FRF_AB_RST_XGTX_WIDTH 1 +#define FRF_AB_RST_EM_LBN 22 +#define FRF_AB_RST_EM_WIDTH 1 +#define FRF_AB_RST_EV_LBN 21 +#define FRF_AB_RST_EV_WIDTH 1 +#define FRF_AB_RST_SR_LBN 20 +#define FRF_AB_RST_SR_WIDTH 1 +#define FRF_AB_RST_RX_LBN 19 +#define FRF_AB_RST_RX_WIDTH 1 +#define FRF_AB_RST_TX_LBN 18 +#define FRF_AB_RST_TX_WIDTH 1 +#define FRF_AB_RST_SF_LBN 17 +#define FRF_AB_RST_SF_WIDTH 1 +#define FRF_AB_RST_CS_LBN 16 +#define FRF_AB_RST_CS_WIDTH 1 +#define FRF_AB_INT_RST_DUR_LBN 4 +#define FRF_AB_INT_RST_DUR_WIDTH 3 +#define FRF_AB_EXT_PHY_RST_DUR_LBN 1 +#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 +#define FFE_AB_EXT_PHY_RST_DUR_10240US 7 +#define FFE_AB_EXT_PHY_RST_DUR_5120US 6 +#define FFE_AB_EXT_PHY_RST_DUR_2560US 5 +#define FFE_AB_EXT_PHY_RST_DUR_1280US 4 +#define FFE_AB_EXT_PHY_RST_DUR_640US 3 +#define FFE_AB_EXT_PHY_RST_DUR_320US 2 +#define FFE_AB_EXT_PHY_RST_DUR_160US 1 +#define FFE_AB_EXT_PHY_RST_DUR_80US 0 +#define FRF_AB_SWRST_LBN 0 +#define FRF_AB_SWRST_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FR_AZ_FATAL_INTR_KER 0x00000230 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 +#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 +#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 +#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 +#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_LBN 11 +#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 +#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_LBN 8 +#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 +#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 +#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 +#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 +#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 +#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 +#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_LBN 1 +#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_LBN 0 +#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 + +/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ +#define FR_BZ_FATAL_INTR_CHAR 0x00000240 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 +#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 +#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 +#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 +#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 +#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 +#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 +#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 +#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 +#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 + +/* DP_CTRL_REG: Datapath control register */ +#define FR_BZ_DP_CTRL 0x00000250 +#define FRF_BZ_FLS_EVQ_ID_LBN 0 +#define FRF_BZ_FLS_EVQ_ID_WIDTH 12 + +/* MEM_STAT_REG: Memory status register */ +#define FR_AZ_MEM_STAT 0x00000260 +#define FRF_AB_MEM_PERR_VEC_LBN 53 +#define FRF_AB_MEM_PERR_VEC_WIDTH 38 +#define FRF_AB_MBIST_CORR_LBN 38 +#define FRF_AB_MBIST_CORR_WIDTH 15 +#define FRF_AB_MBIST_ERR_LBN 0 +#define FRF_AB_MBIST_ERR_WIDTH 40 +#define FRF_CZ_MEM_PERR_VEC_LBN 0 +#define FRF_CZ_MEM_PERR_VEC_WIDTH 35 + +/* CS_DEBUG_REG: Debug register */ +#define FR_AZ_CS_DEBUG 0x00000270 +#define FRF_AB_GLB_DEBUG2_SEL_LBN 50 +#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL2_LBN 47 +#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL1_LBN 44 +#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL0_LBN 41 +#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 +#define FRF_CZ_CS_PORT_NUM_LBN 40 +#define FRF_CZ_CS_PORT_NUM_WIDTH 2 +#define FRF_AB_MISC_DEBUG_ADDR_LBN 36 +#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 +#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 +#define FRF_CZ_CS_PORT_FPE_LBN 1 +#define FRF_CZ_CS_PORT_FPE_WIDTH 35 +#define FRF_AB_EM_DEBUG_ADDR_LBN 26 +#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SR_DEBUG_ADDR_LBN 21 +#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_EV_DEBUG_ADDR_LBN 16 +#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_RX_DEBUG_ADDR_LBN 11 +#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_TX_DEBUG_ADDR_LBN 6 +#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 +#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 +#define FRF_AZ_CS_DEBUG_EN_LBN 0 +#define FRF_AZ_CS_DEBUG_EN_WIDTH 1 + +/* DRIVER_REG: Driver scratch register [0-7] */ +#define FR_AZ_DRIVER 0x00000280 +#define FR_AZ_DRIVER_STEP 16 +#define FR_AZ_DRIVER_ROWS 8 +#define FRF_AZ_DRIVER_DW0_LBN 0 +#define FRF_AZ_DRIVER_DW0_WIDTH 32 + +/* ALTERA_BUILD_REG: Altera build register */ +#define FR_AZ_ALTERA_BUILD 0x00000300 +#define FRF_AZ_ALTERA_BUILD_VER_LBN 0 +#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 + +/* CSR_SPARE_REG: Spare register */ +#define FR_AZ_CSR_SPARE 0x00000310 +#define FRF_AB_MEM_PERR_EN_LBN 64 +#define FRF_AB_MEM_PERR_EN_WIDTH 38 +#define FRF_CZ_MEM_PERR_EN_LBN 64 +#define FRF_CZ_MEM_PERR_EN_WIDTH 35 +#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 +#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 +#define FRF_AZ_CSR_SPARE_BITS_LBN 0 +#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 + +/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ +#define FR_AB_PCIE_SD_CTL0123 0x00000320 +#define FRF_AB_PCIE_TESTSIG_H_LBN 96 +#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 +#define FRF_AB_PCIE_TESTSIG_L_LBN 64 +#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 +#define FRF_AB_PCIE_OFFSET_LBN 56 +#define FRF_AB_PCIE_OFFSET_WIDTH 8 +#define FRF_AB_PCIE_OFFSETEN_H_LBN 55 +#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 +#define FRF_AB_PCIE_OFFSETEN_L_LBN 54 +#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_H_LBN 53 +#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_L_LBN 52 +#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_H_LBN 51 +#define FRF_AB_PCIE_PARRESET_H_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_L_LBN 50 +#define FRF_AB_PCIE_PARRESET_L_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 +#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 +#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 +#define FRF_AB_PCIE_LPBK_LBN 40 +#define FRF_AB_PCIE_LPBK_WIDTH 8 +#define FRF_AB_PCIE_PARLPBK_LBN 32 +#define FRF_AB_PCIE_PARLPBK_WIDTH 8 +#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 +#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 +#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 +#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 +#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_RXEQCTL_H_LBN 18 +#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 +#define FRF_AB_PCIE_RXEQCTL_L_LBN 16 +#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 +#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 +#define FFE_AB_PCIE_RXEQCTL_OFF 2 +#define FFE_AB_PCIE_RXEQCTL_MIN 1 +#define FFE_AB_PCIE_RXEQCTL_MAX 0 +#define FRF_AB_PCIE_HIDRV_LBN 8 +#define FRF_AB_PCIE_HIDRV_WIDTH 8 +#define FRF_AB_PCIE_LODRV_LBN 0 +#define FRF_AB_PCIE_LODRV_WIDTH 8 + +/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ +#define FR_AB_PCIE_SD_CTL45 0x00000330 +#define FRF_AB_PCIE_DTX7_LBN 60 +#define FRF_AB_PCIE_DTX7_WIDTH 4 +#define FRF_AB_PCIE_DTX6_LBN 56 +#define FRF_AB_PCIE_DTX6_WIDTH 4 +#define FRF_AB_PCIE_DTX5_LBN 52 +#define FRF_AB_PCIE_DTX5_WIDTH 4 +#define FRF_AB_PCIE_DTX4_LBN 48 +#define FRF_AB_PCIE_DTX4_WIDTH 4 +#define FRF_AB_PCIE_DTX3_LBN 44 +#define FRF_AB_PCIE_DTX3_WIDTH 4 +#define FRF_AB_PCIE_DTX2_LBN 40 +#define FRF_AB_PCIE_DTX2_WIDTH 4 +#define FRF_AB_PCIE_DTX1_LBN 36 +#define FRF_AB_PCIE_DTX1_WIDTH 4 +#define FRF_AB_PCIE_DTX0_LBN 32 +#define FRF_AB_PCIE_DTX0_WIDTH 4 +#define FRF_AB_PCIE_DEQ7_LBN 28 +#define FRF_AB_PCIE_DEQ7_WIDTH 4 +#define FRF_AB_PCIE_DEQ6_LBN 24 +#define FRF_AB_PCIE_DEQ6_WIDTH 4 +#define FRF_AB_PCIE_DEQ5_LBN 20 +#define FRF_AB_PCIE_DEQ5_WIDTH 4 +#define FRF_AB_PCIE_DEQ4_LBN 16 +#define FRF_AB_PCIE_DEQ4_WIDTH 4 +#define FRF_AB_PCIE_DEQ3_LBN 12 +#define FRF_AB_PCIE_DEQ3_WIDTH 4 +#define FRF_AB_PCIE_DEQ2_LBN 8 +#define FRF_AB_PCIE_DEQ2_WIDTH 4 +#define FRF_AB_PCIE_DEQ1_LBN 4 +#define FRF_AB_PCIE_DEQ1_WIDTH 4 +#define FRF_AB_PCIE_DEQ0_LBN 0 +#define FRF_AB_PCIE_DEQ0_WIDTH 4 + +/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ +#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 +#define FRF_AB_PCIE_PRBSERR_LBN 40 +#define FRF_AB_PCIE_PRBSERR_WIDTH 8 +#define FRF_AB_PCIE_PRBSERRH0_LBN 32 +#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 +#define FRF_AB_PCIE_FASTINIT_H_LBN 15 +#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 +#define FRF_AB_PCIE_FASTINIT_L_LBN 14 +#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 +#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 +#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 +#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 +#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 +#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 +#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSEL_LBN 0 +#define FRF_AB_PCIE_PRBSSEL_WIDTH 8 + +/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ +#define FR_BB_DEBUG_DATA_OUT 0x00000350 +#define FRF_BB_DEBUG2_PORT_LBN 25 +#define FRF_BB_DEBUG2_PORT_WIDTH 15 +#define FRF_BB_DEBUG1_PORT_LBN 0 +#define FRF_BB_DEBUG1_PORT_WIDTH 25 + +/* EVQ_RPTR_REGP0: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR_P0 0x00000400 +#define FR_BZ_EVQ_RPTR_P0_STEP 8192 +#define FR_BZ_EVQ_RPTR_P0_ROWS 1024 +/* EVQ_RPTR_REG_KER: Event queue read pointer register */ +#define FR_AA_EVQ_RPTR_KER 0x00011b00 +#define FR_AA_EVQ_RPTR_KER_STEP 4 +#define FR_AA_EVQ_RPTR_KER_ROWS 4 +/* EVQ_RPTR_REG: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR 0x00fa0000 +#define FR_BZ_EVQ_RPTR_STEP 16 +#define FR_BB_EVQ_RPTR_ROWS 4096 +#define FR_CZ_EVQ_RPTR_ROWS 1024 +/* EVQ_RPTR_REGP123: Event queue read pointer register */ +#define FR_BB_EVQ_RPTR_P123 0x01000400 +#define FR_BB_EVQ_RPTR_P123_STEP 8192 +#define FR_BB_EVQ_RPTR_P123_ROWS 3072 +#define FRF_AZ_EVQ_RPTR_VLD_LBN 15 +#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 +#define FRF_AZ_EVQ_RPTR_LBN 0 +#define FRF_AZ_EVQ_RPTR_WIDTH 15 + +/* TIMER_COMMAND_REGP0: Timer Command Registers */ +#define FR_BZ_TIMER_COMMAND_P0 0x00000420 +#define FR_BZ_TIMER_COMMAND_P0_STEP 8192 +#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 +/* TIMER_COMMAND_REG_KER: Timer Command Registers */ +#define FR_AA_TIMER_COMMAND_KER 0x00000420 +#define FR_AA_TIMER_COMMAND_KER_STEP 8192 +#define FR_AA_TIMER_COMMAND_KER_ROWS 4 +/* TIMER_COMMAND_REGP123: Timer Command Registers */ +#define FR_BB_TIMER_COMMAND_P123 0x01000420 +#define FR_BB_TIMER_COMMAND_P123_STEP 8192 +#define FR_BB_TIMER_COMMAND_P123_ROWS 3072 +#define FRF_CZ_TC_TIMER_MODE_LBN 14 +#define FRF_CZ_TC_TIMER_MODE_WIDTH 2 +#define FRF_AB_TC_TIMER_MODE_LBN 12 +#define FRF_AB_TC_TIMER_MODE_WIDTH 2 +#define FRF_CZ_TC_TIMER_VAL_LBN 0 +#define FRF_CZ_TC_TIMER_VAL_WIDTH 14 +#define FRF_AB_TC_TIMER_VAL_LBN 0 +#define FRF_AB_TC_TIMER_VAL_WIDTH 12 + +/* DRV_EV_REG: Driver generated event register */ +#define FR_AZ_DRV_EV 0x00000440 +#define FRF_AZ_DRV_EV_QID_LBN 64 +#define FRF_AZ_DRV_EV_QID_WIDTH 12 +#define FRF_AZ_DRV_EV_DATA_LBN 0 +#define FRF_AZ_DRV_EV_DATA_WIDTH 64 + +/* EVQ_CTL_REG: Event queue control register */ +#define FR_AZ_EVQ_CTL 0x00000450 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 +#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 +#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 +#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 +#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 + +/* EVQ_CNT1_REG: Event counter 1 register */ +#define FR_AZ_EVQ_CNT1 0x00000460 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 +#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 +#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 +#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 +#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 + +/* EVQ_CNT2_REG: Event counter 2 register */ +#define FR_AZ_EVQ_CNT2 0x00000470 +#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 +#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 +#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RDY_CNT_LBN 80 +#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 +#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 + +/* USR_EV_REG: Event mailbox register */ +#define FR_CZ_USR_EV 0x00000540 +#define FR_CZ_USR_EV_STEP 8192 +#define FR_CZ_USR_EV_ROWS 1024 +#define FRF_CZ_USR_EV_DATA_LBN 0 +#define FRF_CZ_USR_EV_DATA_WIDTH 32 + +/* BUF_TBL_CFG_REG: Buffer table configuration register */ +#define FR_AZ_BUF_TBL_CFG 0x00000600 +#define FRF_AZ_BUF_TBL_MODE_LBN 3 +#define FRF_AZ_BUF_TBL_MODE_WIDTH 1 + +/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ +#define FR_AZ_SRM_RX_DC_CFG 0x00000610 +#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 +#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 + +/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ +#define FR_AZ_SRM_TX_DC_CFG 0x00000620 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 + +/* SRM_CFG_REG: SRAM configuration register */ +#define FR_AZ_SRM_CFG 0x00000630 +#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 +#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 +#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 +#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 +#define FRF_AZ_SRM_INIT_EN_LBN 3 +#define FRF_AZ_SRM_INIT_EN_WIDTH 1 +#define FRF_AZ_SRM_NUM_BANK_LBN 2 +#define FRF_AZ_SRM_NUM_BANK_WIDTH 1 +#define FRF_AZ_SRM_BANK_SIZE_LBN 0 +#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 + +/* BUF_TBL_UPD_REG: Buffer table update register */ +#define FR_AZ_BUF_TBL_UPD 0x00000650 +#define FRF_AZ_BUF_UPD_CMD_LBN 63 +#define FRF_AZ_BUF_UPD_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_CMD_LBN 62 +#define FRF_AZ_BUF_CLR_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_END_ID_LBN 32 +#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 +#define FRF_AZ_BUF_CLR_START_ID_LBN 0 +#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 + +/* SRM_UPD_EVQ_REG: Buffer table update register */ +#define FR_AZ_SRM_UPD_EVQ 0x00000660 +#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 +#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 + +/* SRAM_PARITY_REG: SRAM parity register. */ +#define FR_AZ_SRAM_PARITY 0x00000670 +#define FRF_CZ_BYPASS_ECC_LBN 3 +#define FRF_CZ_BYPASS_ECC_WIDTH 1 +#define FRF_CZ_SEC_INT_LBN 2 +#define FRF_CZ_SEC_INT_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 +#define FRF_AB_FORCE_SRAM_PERR_LBN 0 +#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 + +/* RX_CFG_REG: Receive configuration register */ +#define FR_AZ_RX_CFG 0x00000800 +#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 +#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 +#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 +#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 +#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 +#define FRF_BZ_RX_TCP_SUP_LBN 48 +#define FRF_BZ_RX_TCP_SUP_WIDTH 1 +#define FRF_BZ_RX_INGR_EN_LBN 47 +#define FRF_BZ_RX_INGR_EN_WIDTH 1 +#define FRF_BZ_RX_IP_HASH_LBN 46 +#define FRF_BZ_RX_IP_HASH_WIDTH 1 +#define FRF_BZ_RX_HASH_ALG_LBN 45 +#define FRF_BZ_RX_HASH_ALG_WIDTH 1 +#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 +#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 +#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 +#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 +#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 +#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_OWNERR_CTL_LBN 38 +#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 +#define FRF_BZ_RX_XON_TX_TH_LBN 33 +#define FRF_BZ_RX_XON_TX_TH_WIDTH 5 +#define FRF_AA_RX_DESC_PUSH_EN_LBN 35 +#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_AA_RX_RDW_PATCH_EN_LBN 34 +#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 +#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_XOFF_TX_TH_LBN 28 +#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_OWNERR_CTL_LBN 30 +#define FRF_AA_RX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_RX_XON_TX_TH_LBN 25 +#define FRF_AA_RX_XON_TX_TH_WIDTH 5 +#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 +#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_AA_RX_XOFF_TX_TH_LBN 20 +#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_USR_BUF_SIZE_LBN 11 +#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_BZ_RX_XON_MAC_TH_LBN 10 +#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XON_MAC_TH_LBN 6 +#define FRF_AA_RX_XON_MAC_TH_WIDTH 5 +#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 +#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XOFF_MAC_TH_LBN 1 +#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 +#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 +#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 + +/* RX_FILTER_CTL_REG: Receive filter control registers */ +#define FR_BZ_RX_FILTER_CTL 0x00000810 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_NUM_KER_LBN 24 +#define FRF_BZ_NUM_KER_WIDTH 2 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 + +/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ +#define FR_AZ_RX_FLUSH_DESCQ 0x00000820 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +#define FR_BZ_RX_DESC_UPD_P0 0x00000830 +#define FR_BZ_RX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 +/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ +#define FR_AA_RX_DESC_UPD_KER 0x00000830 +#define FR_AA_RX_DESC_UPD_KER_STEP 8192 +#define FR_AA_RX_DESC_UPD_KER_ROWS 4 +/* RX_DESC_UPD_REGP123: Receive descriptor update register. */ +#define FR_BB_RX_DESC_UPD_P123 0x01000830 +#define FR_BB_RX_DESC_UPD_P123_STEP 8192 +#define FR_BB_RX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_RX_DESC_WPTR_LBN 96 +#define FRF_AZ_RX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_RX_DESC_LBN 0 +#define FRF_AZ_RX_DESC_WIDTH 64 + +/* RX_DC_CFG_REG: Receive descriptor cache configuration register */ +#define FR_AZ_RX_DC_CFG 0x00000840 +#define FRF_AB_RX_MAX_PF_LBN 2 +#define FRF_AB_RX_MAX_PF_WIDTH 2 +#define FRF_AZ_RX_DC_SIZE_LBN 0 +#define FRF_AZ_RX_DC_SIZE_WIDTH 2 +#define FFE_AZ_RX_DC_SIZE_64 3 +#define FFE_AZ_RX_DC_SIZE_32 2 +#define FFE_AZ_RX_DC_SIZE_16 1 +#define FFE_AZ_RX_DC_SIZE_8 0 + +/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ +#define FR_AZ_RX_DC_PF_WM 0x00000850 +#define FRF_AZ_RX_DC_PF_HWM_LBN 6 +#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 +#define FRF_AZ_RX_DC_PF_LWM_LBN 0 +#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 + +/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ +#define FR_BZ_RX_RSS_TKEY 0x00000860 +#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 +#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 +#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 +#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 + +/* RX_NODESC_DROP_REG: Receive dropped packet counter register */ +#define FR_AZ_RX_NODESC_DROP 0x00000880 +#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 +#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 + +/* RX_SELF_RST_REG: Receive self reset register */ +#define FR_AA_RX_SELF_RST 0x00000890 +#define FRF_AA_RX_ISCSI_DIS_LBN 17 +#define FRF_AA_RX_ISCSI_DIS_WIDTH 1 +#define FRF_AA_RX_SW_RST_REG_LBN 16 +#define FRF_AA_RX_SW_RST_REG_WIDTH 1 +#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 +#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 +#define FRF_AA_RX_SELF_RST_EN_LBN 8 +#define FRF_AA_RX_SELF_RST_EN_WIDTH 1 +#define FRF_AA_RX_MAX_PF_LAT_LBN 4 +#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 +#define FRF_AA_RX_MAX_LU_LAT_LBN 0 +#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 + +/* RX_DEBUG_REG: undocumented register */ +#define FR_AZ_RX_DEBUG 0x000008a0 +#define FRF_AZ_RX_DEBUG_LBN 0 +#define FRF_AZ_RX_DEBUG_WIDTH 64 + +/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ +#define FR_AZ_RX_PUSH_DROP 0x000008b0 +#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 + +/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ +#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 + +/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ +#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 + +/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ +#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 + +/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ +#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_BZ_TX_DESC_UPD_P0 0x00000a10 +#define FR_BZ_TX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 +/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ +#define FR_AA_TX_DESC_UPD_KER 0x00000a10 +#define FR_AA_TX_DESC_UPD_KER_STEP 8192 +#define FR_AA_TX_DESC_UPD_KER_ROWS 8 +/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ +#define FR_BB_TX_DESC_UPD_P123 0x01000a10 +#define FR_BB_TX_DESC_UPD_P123_STEP 8192 +#define FR_BB_TX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_TX_DESC_WPTR_LBN 96 +#define FRF_AZ_TX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_TX_DESC_LBN 0 +#define FRF_AZ_TX_DESC_WIDTH 95 + +/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ +#define FR_AZ_TX_DC_CFG 0x00000a20 +#define FRF_AZ_TX_DC_SIZE_LBN 0 +#define FRF_AZ_TX_DC_SIZE_WIDTH 2 +#define FFE_AZ_TX_DC_SIZE_32 2 +#define FFE_AZ_TX_DC_SIZE_16 1 +#define FFE_AZ_TX_DC_SIZE_8 0 + +/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ +#define FR_AA_TX_CHKSM_CFG 0x00000a30 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 + +/* TX_CFG_REG: Transmit configuration register */ +#define FR_AZ_TX_CFG 0x00000a50 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 +#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 +#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 +#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 +#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 +#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 +#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 +#define FRF_AZ_TX_P1_PRI_EN_LBN 4 +#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 +#define FRF_AZ_TX_OWNERR_CTL_LBN 2 +#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 +#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 + +/* TX_PUSH_DROP_REG: Transmit push dropped register */ +#define FR_AZ_TX_PUSH_DROP 0x00000a60 +#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 + +/* TX_RESERVED_REG: Transmit configuration register */ +#define FR_AZ_TX_RESERVED 0x00000a80 +#define FRF_AZ_TX_EVT_CNT_LBN 121 +#define FRF_AZ_TX_EVT_CNT_WIDTH 7 +#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 +#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 +#define FRF_AZ_TX_RD_COMP_TMR_LBN 96 +#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 +#define FRF_AZ_TX_PUSH_EN_LBN 89 +#define FRF_AZ_TX_PUSH_EN_WIDTH 1 +#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 +#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 +#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 +#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 +#define FRF_AZ_TX_DMAR_ST_P0_LBN 81 +#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 +#define FRF_AZ_TX_DMAQ_ST_LBN 78 +#define FRF_AZ_TX_DMAQ_ST_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_LBN 64 +#define FRF_AZ_TX_RX_SPACER_WIDTH 8 +#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 +#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 +#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 +#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 +#define FRF_AZ_TX_PS_EVT_DIS_LBN 58 +#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_EN_LBN 57 +#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 +#define FRF_AZ_TX_XP_TIMER_LBN 52 +#define FRF_AZ_TX_XP_TIMER_WIDTH 5 +#define FRF_AZ_TX_PREF_SPACER_LBN 44 +#define FRF_AZ_TX_PREF_SPACER_WIDTH 8 +#define FRF_AZ_TX_PREF_WD_TMR_LBN 22 +#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 +#define FRF_AZ_TX_ONLY1TAG_LBN 21 +#define FRF_AZ_TX_ONLY1TAG_WIDTH 1 +#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 +#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 +#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 +#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 +#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 +#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 +#define FRF_AA_TX_DMA_FF_THR_LBN 16 +#define FRF_AA_TX_DMA_FF_THR_WIDTH 1 +#define FRF_AZ_TX_DMA_SPACER_LBN 8 +#define FRF_AZ_TX_DMA_SPACER_WIDTH 8 +#define FRF_AA_TX_TCP_DIS_LBN 7 +#define FRF_AA_TX_TCP_DIS_WIDTH 1 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 +#define FRF_AA_TX_IP_DIS_LBN 6 +#define FRF_AA_TX_IP_DIS_WIDTH 1 +#define FRF_AZ_TX_MAX_CPL_LBN 2 +#define FRF_AZ_TX_MAX_CPL_WIDTH 2 +#define FFE_AZ_TX_MAX_CPL_16 3 +#define FFE_AZ_TX_MAX_CPL_8 2 +#define FFE_AZ_TX_MAX_CPL_4 1 +#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 +#define FRF_AZ_TX_MAX_PREF_LBN 0 +#define FRF_AZ_TX_MAX_PREF_WIDTH 2 +#define FFE_AZ_TX_MAX_PREF_32 3 +#define FFE_AZ_TX_MAX_PREF_16 2 +#define FFE_AZ_TX_MAX_PREF_8 1 +#define FFE_AZ_TX_MAX_PREF_OFF 0 + +/* TX_PACE_REG: Transmit pace control register */ +#define FR_BZ_TX_PACE 0x00000a90 +#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 +#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_SB_AF_LBN 9 +#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_FB_BASE_LBN 5 +#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 +#define FRF_BZ_TX_PACE_BIN_TH_LBN 0 +#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 + +/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ +#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 + +/* TX_VLAN_REG: Transmit VLAN tag register */ +#define FR_BB_TX_VLAN 0x00000ae0 +#define FRF_BB_TX_VLAN_EN_LBN 127 +#define FRF_BB_TX_VLAN_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 +#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 +#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_LBN 112 +#define FRF_BB_TX_VLAN7_WIDTH 12 +#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 +#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 +#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_LBN 96 +#define FRF_BB_TX_VLAN6_WIDTH 12 +#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 +#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 +#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_LBN 80 +#define FRF_BB_TX_VLAN5_WIDTH 12 +#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 +#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 +#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_LBN 64 +#define FRF_BB_TX_VLAN4_WIDTH 12 +#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 +#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 +#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_LBN 48 +#define FRF_BB_TX_VLAN3_WIDTH 12 +#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 +#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 +#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_LBN 32 +#define FRF_BB_TX_VLAN2_WIDTH 12 +#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 +#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 +#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_LBN 16 +#define FRF_BB_TX_VLAN1_WIDTH 12 +#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 +#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 +#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_LBN 0 +#define FRF_BB_TX_VLAN0_WIDTH 12 + +/* TX_IPFIL_PORTEN_REG: Transmit filter control register */ +#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 +#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 +#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 +#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 +#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 +#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 +#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 +#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 +#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 +#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 +#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 +#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 +#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 +#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 +#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 +#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 +#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 +#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 +#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 +#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 +#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 +#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 +#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 +#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 +#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 +#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 +#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 +#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 +#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 +#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 +#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 +#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 +#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 +#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 +#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 + +/* TX_IPFIL_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_IPFIL_TBL 0x00000b00 +#define FR_BB_TX_IPFIL_TBL_STEP 16 +#define FR_BB_TX_IPFIL_TBL_ROWS 16 +#define FRF_BB_TX_IPFIL_MASK_1_LBN 96 +#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 +#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 +#define FRF_BB_TX_IPFIL_MASK_0_LBN 32 +#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 +#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 + +/* MD_TXD_REG: PHY management transmit data register */ +#define FR_AB_MD_TXD 0x00000c00 +#define FRF_AB_MD_TXD_LBN 0 +#define FRF_AB_MD_TXD_WIDTH 16 + +/* MD_RXD_REG: PHY management receive data register */ +#define FR_AB_MD_RXD 0x00000c10 +#define FRF_AB_MD_RXD_LBN 0 +#define FRF_AB_MD_RXD_WIDTH 16 + +/* MD_CS_REG: PHY management configuration & status register */ +#define FR_AB_MD_CS 0x00000c20 +#define FRF_AB_MD_RD_EN_CMD_LBN 15 +#define FRF_AB_MD_RD_EN_CMD_WIDTH 1 +#define FRF_AB_MD_WR_EN_CMD_LBN 14 +#define FRF_AB_MD_WR_EN_CMD_WIDTH 1 +#define FRF_AB_MD_ADDR_CMD_LBN 13 +#define FRF_AB_MD_ADDR_CMD_WIDTH 1 +#define FRF_AB_MD_PT_LBN 7 +#define FRF_AB_MD_PT_WIDTH 3 +#define FRF_AB_MD_PL_LBN 6 +#define FRF_AB_MD_PL_WIDTH 1 +#define FRF_AB_MD_INT_CLR_LBN 5 +#define FRF_AB_MD_INT_CLR_WIDTH 1 +#define FRF_AB_MD_GC_LBN 4 +#define FRF_AB_MD_GC_WIDTH 1 +#define FRF_AB_MD_PRSP_LBN 3 +#define FRF_AB_MD_PRSP_WIDTH 1 +#define FRF_AB_MD_RIC_LBN 2 +#define FRF_AB_MD_RIC_WIDTH 1 +#define FRF_AB_MD_RDC_LBN 1 +#define FRF_AB_MD_RDC_WIDTH 1 +#define FRF_AB_MD_WRC_LBN 0 +#define FRF_AB_MD_WRC_WIDTH 1 + +/* MD_PHY_ADR_REG: PHY management PHY address register */ +#define FR_AB_MD_PHY_ADR 0x00000c30 +#define FRF_AB_MD_PHY_ADR_LBN 0 +#define FRF_AB_MD_PHY_ADR_WIDTH 16 + +/* MD_ID_REG: PHY management ID register */ +#define FR_AB_MD_ID 0x00000c40 +#define FRF_AB_MD_PRT_ADR_LBN 11 +#define FRF_AB_MD_PRT_ADR_WIDTH 5 +#define FRF_AB_MD_DEV_ADR_LBN 6 +#define FRF_AB_MD_DEV_ADR_WIDTH 5 + +/* MD_STAT_REG: PHY management status & mask register */ +#define FR_AB_MD_STAT 0x00000c50 +#define FRF_AB_MD_PINT_LBN 4 +#define FRF_AB_MD_PINT_WIDTH 1 +#define FRF_AB_MD_DONE_LBN 3 +#define FRF_AB_MD_DONE_WIDTH 1 +#define FRF_AB_MD_BSERR_LBN 2 +#define FRF_AB_MD_BSERR_WIDTH 1 +#define FRF_AB_MD_LNFL_LBN 1 +#define FRF_AB_MD_LNFL_WIDTH 1 +#define FRF_AB_MD_BSY_LBN 0 +#define FRF_AB_MD_BSY_WIDTH 1 + +/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ +#define FR_AB_MAC_STAT_DMA 0x00000c60 +#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 +#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 +#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 +#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 + +/* MAC_CTRL_REG: Port MAC control register */ +#define FR_AB_MAC_CTRL 0x00000c80 +#define FRF_AB_MAC_XOFF_VAL_LBN 16 +#define FRF_AB_MAC_XOFF_VAL_WIDTH 16 +#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 +#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 +#define FRF_AB_MAC_XG_DISTXCRC_LBN 5 +#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 +#define FRF_AB_MAC_BCAD_ACPT_LBN 4 +#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 +#define FRF_AB_MAC_UC_PROM_LBN 3 +#define FRF_AB_MAC_UC_PROM_WIDTH 1 +#define FRF_AB_MAC_LINK_STATUS_LBN 2 +#define FRF_AB_MAC_LINK_STATUS_WIDTH 1 +#define FRF_AB_MAC_SPEED_LBN 0 +#define FRF_AB_MAC_SPEED_WIDTH 2 +#define FFE_AB_MAC_SPEED_10G 3 +#define FFE_AB_MAC_SPEED_1G 2 +#define FFE_AB_MAC_SPEED_100M 1 +#define FFE_AB_MAC_SPEED_10M 0 + +/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ +#define FR_BB_GEN_MODE 0x00000c90 +#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 +#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 +#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XFP_PHY_INT_MASK_LBN 1 +#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 +#define FRF_BB_XG_PHY_INT_MASK_LBN 0 +#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 + +/* MAC_MC_HASH_REG0: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 +#define FRF_AB_MAC_MCAST_HASH0_LBN 0 +#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 + +/* MAC_MC_HASH_REG1: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 +#define FRF_AB_MAC_MCAST_HASH1_LBN 0 +#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 + +/* GM_CFG1_REG: GMAC configuration register 1 */ +#define FR_AB_GM_CFG1 0x00000e00 +#define FRF_AB_GM_SW_RST_LBN 31 +#define FRF_AB_GM_SW_RST_WIDTH 1 +#define FRF_AB_GM_SIM_RST_LBN 30 +#define FRF_AB_GM_SIM_RST_WIDTH 1 +#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 +#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 +#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_RX_FUNC_LBN 17 +#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 +#define FRF_AB_GM_RST_TX_FUNC_LBN 16 +#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 +#define FRF_AB_GM_LOOP_LBN 8 +#define FRF_AB_GM_LOOP_WIDTH 1 +#define FRF_AB_GM_RX_FC_EN_LBN 5 +#define FRF_AB_GM_RX_FC_EN_WIDTH 1 +#define FRF_AB_GM_TX_FC_EN_LBN 4 +#define FRF_AB_GM_TX_FC_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_RXEN_LBN 3 +#define FRF_AB_GM_SYNC_RXEN_WIDTH 1 +#define FRF_AB_GM_RX_EN_LBN 2 +#define FRF_AB_GM_RX_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_TXEN_LBN 1 +#define FRF_AB_GM_SYNC_TXEN_WIDTH 1 +#define FRF_AB_GM_TX_EN_LBN 0 +#define FRF_AB_GM_TX_EN_WIDTH 1 + +/* GM_CFG2_REG: GMAC configuration register 2 */ +#define FR_AB_GM_CFG2 0x00000e10 +#define FRF_AB_GM_PAMBL_LEN_LBN 12 +#define FRF_AB_GM_PAMBL_LEN_WIDTH 4 +#define FRF_AB_GM_IF_MODE_LBN 8 +#define FRF_AB_GM_IF_MODE_WIDTH 2 +#define FFE_AB_IF_MODE_BYTE_MODE 2 +#define FFE_AB_IF_MODE_NIBBLE_MODE 1 +#define FRF_AB_GM_HUGE_FRM_EN_LBN 5 +#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 +#define FRF_AB_GM_LEN_CHK_LBN 4 +#define FRF_AB_GM_LEN_CHK_WIDTH 1 +#define FRF_AB_GM_PAD_CRC_EN_LBN 2 +#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 +#define FRF_AB_GM_CRC_EN_LBN 1 +#define FRF_AB_GM_CRC_EN_WIDTH 1 +#define FRF_AB_GM_FD_LBN 0 +#define FRF_AB_GM_FD_WIDTH 1 + +/* GM_IPG_REG: GMAC IPG register */ +#define FR_AB_GM_IPG 0x00000e20 +#define FRF_AB_GM_NONB2B_IPG1_LBN 24 +#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 +#define FRF_AB_GM_NONB2B_IPG2_LBN 16 +#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 +#define FRF_AB_GM_MIN_IPG_ENF_LBN 8 +#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 +#define FRF_AB_GM_B2B_IPG_LBN 0 +#define FRF_AB_GM_B2B_IPG_WIDTH 7 + +/* GM_HD_REG: GMAC half duplex register */ +#define FR_AB_GM_HD 0x00000e30 +#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 +#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 +#define FRF_AB_GM_ALT_BOFF_EN_LBN 19 +#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 +#define FRF_AB_GM_BP_NO_BOFF_LBN 18 +#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 +#define FRF_AB_GM_DIS_BOFF_LBN 17 +#define FRF_AB_GM_DIS_BOFF_WIDTH 1 +#define FRF_AB_GM_EXDEF_TX_EN_LBN 16 +#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 +#define FRF_AB_GM_RTRY_LIMIT_LBN 12 +#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 +#define FRF_AB_GM_COL_WIN_LBN 0 +#define FRF_AB_GM_COL_WIN_WIDTH 10 + +/* GM_MAX_FLEN_REG: GMAC maximum frame length register */ +#define FR_AB_GM_MAX_FLEN 0x00000e40 +#define FRF_AB_GM_MAX_FLEN_LBN 0 +#define FRF_AB_GM_MAX_FLEN_WIDTH 16 + +/* GM_TEST_REG: GMAC test register */ +#define FR_AB_GM_TEST 0x00000e70 +#define FRF_AB_GM_MAX_BOFF_LBN 3 +#define FRF_AB_GM_MAX_BOFF_WIDTH 1 +#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 +#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 +#define FRF_AB_GM_TEST_PAUSE_LBN 1 +#define FRF_AB_GM_TEST_PAUSE_WIDTH 1 +#define FRF_AB_GM_SHORT_SLOT_LBN 0 +#define FRF_AB_GM_SHORT_SLOT_WIDTH 1 + +/* GM_ADR1_REG: GMAC station address register 1 */ +#define FR_AB_GM_ADR1 0x00000f00 +#define FRF_AB_GM_ADR_B0_LBN 24 +#define FRF_AB_GM_ADR_B0_WIDTH 8 +#define FRF_AB_GM_ADR_B1_LBN 16 +#define FRF_AB_GM_ADR_B1_WIDTH 8 +#define FRF_AB_GM_ADR_B2_LBN 8 +#define FRF_AB_GM_ADR_B2_WIDTH 8 +#define FRF_AB_GM_ADR_B3_LBN 0 +#define FRF_AB_GM_ADR_B3_WIDTH 8 + +/* GM_ADR2_REG: GMAC station address register 2 */ +#define FR_AB_GM_ADR2 0x00000f10 +#define FRF_AB_GM_ADR_B4_LBN 24 +#define FRF_AB_GM_ADR_B4_WIDTH 8 +#define FRF_AB_GM_ADR_B5_LBN 16 +#define FRF_AB_GM_ADR_B5_WIDTH 8 + +/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ +#define FR_AB_GMF_CFG0 0x00000f20 +#define FRF_AB_GMF_FTFENRPLY_LBN 20 +#define FRF_AB_GMF_FTFENRPLY_WIDTH 1 +#define FRF_AB_GMF_STFENRPLY_LBN 19 +#define FRF_AB_GMF_STFENRPLY_WIDTH 1 +#define FRF_AB_GMF_FRFENRPLY_LBN 18 +#define FRF_AB_GMF_FRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_SRFENRPLY_LBN 17 +#define FRF_AB_GMF_SRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_WTMENRPLY_LBN 16 +#define FRF_AB_GMF_WTMENRPLY_WIDTH 1 +#define FRF_AB_GMF_FTFENREQ_LBN 12 +#define FRF_AB_GMF_FTFENREQ_WIDTH 1 +#define FRF_AB_GMF_STFENREQ_LBN 11 +#define FRF_AB_GMF_STFENREQ_WIDTH 1 +#define FRF_AB_GMF_FRFENREQ_LBN 10 +#define FRF_AB_GMF_FRFENREQ_WIDTH 1 +#define FRF_AB_GMF_SRFENREQ_LBN 9 +#define FRF_AB_GMF_SRFENREQ_WIDTH 1 +#define FRF_AB_GMF_WTMENREQ_LBN 8 +#define FRF_AB_GMF_WTMENREQ_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFT_LBN 4 +#define FRF_AB_GMF_HSTRSTFT_WIDTH 1 +#define FRF_AB_GMF_HSTRSTST_LBN 3 +#define FRF_AB_GMF_HSTRSTST_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFR_LBN 2 +#define FRF_AB_GMF_HSTRSTFR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTSR_LBN 1 +#define FRF_AB_GMF_HSTRSTSR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTWT_LBN 0 +#define FRF_AB_GMF_HSTRSTWT_WIDTH 1 + +/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ +#define FR_AB_GMF_CFG1 0x00000f30 +#define FRF_AB_GMF_CFGFRTH_LBN 16 +#define FRF_AB_GMF_CFGFRTH_WIDTH 5 +#define FRF_AB_GMF_CFGXOFFRTX_LBN 0 +#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 + +/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ +#define FR_AB_GMF_CFG2 0x00000f40 +#define FRF_AB_GMF_CFGHWM_LBN 16 +#define FRF_AB_GMF_CFGHWM_WIDTH 6 +#define FRF_AB_GMF_CFGLWM_LBN 0 +#define FRF_AB_GMF_CFGLWM_WIDTH 6 + +/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ +#define FR_AB_GMF_CFG3 0x00000f50 +#define FRF_AB_GMF_CFGHWMFT_LBN 16 +#define FRF_AB_GMF_CFGHWMFT_WIDTH 6 +#define FRF_AB_GMF_CFGFTTH_LBN 0 +#define FRF_AB_GMF_CFGFTTH_WIDTH 6 + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FR_AB_GMF_CFG4 0x00000f60 +#define FRF_AB_GMF_HSTFLTRFRM_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FR_AB_GMF_CFG5 0x00000f70 +#define FRF_AB_GMF_CFGHDPLX_LBN 22 +#define FRF_AB_GMF_CFGHDPLX_WIDTH 1 +#define FRF_AB_GMF_SRFULL_LBN 21 +#define FRF_AB_GMF_SRFULL_WIDTH 1 +#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 +#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 +#define FRF_AB_GMF_CFGBYTMODE_LBN 19 +#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 +#define FRF_AB_GMF_HSTDRPLT64_LBN 18 +#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 +#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 + +/* TX_SRC_MAC_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_SRC_MAC_TBL 0x00001000 +#define FR_BB_TX_SRC_MAC_TBL_STEP 16 +#define FR_BB_TX_SRC_MAC_TBL_ROWS 16 +#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 +#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 +#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 +#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 + +/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ +#define FR_BB_TX_SRC_MAC_CTL 0x00001100 +#define FRF_BB_TX_SRC_DROP_CTR_LBN 16 +#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 +#define FRF_BB_TX_SRC_FLTR_EN_LBN 15 +#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 +#define FRF_BB_TX_DROP_CTR_CLR_LBN 12 +#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 +#define FRF_BB_TX_MAC_QID_SEL_LBN 0 +#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 + +/* XM_ADR_LO_REG: XGMAC address register low */ +#define FR_AB_XM_ADR_LO 0x00001200 +#define FRF_AB_XM_ADR_LO_LBN 0 +#define FRF_AB_XM_ADR_LO_WIDTH 32 + +/* XM_ADR_HI_REG: XGMAC address register high */ +#define FR_AB_XM_ADR_HI 0x00001210 +#define FRF_AB_XM_ADR_HI_LBN 0 +#define FRF_AB_XM_ADR_HI_WIDTH 16 + +/* XM_GLB_CFG_REG: XGMAC global configuration */ +#define FR_AB_XM_GLB_CFG 0x00001220 +#define FRF_AB_XM_RMTFLT_GEN_LBN 17 +#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 +#define FRF_AB_XM_DEBUG_MODE_LBN 16 +#define FRF_AB_XM_DEBUG_MODE_WIDTH 1 +#define FRF_AB_XM_RX_STAT_EN_LBN 11 +#define FRF_AB_XM_RX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_TX_STAT_EN_LBN 10 +#define FRF_AB_XM_TX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 +#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_WAN_MODE_LBN 5 +#define FRF_AB_XM_WAN_MODE_WIDTH 1 +#define FRF_AB_XM_INTCLR_MODE_LBN 3 +#define FRF_AB_XM_INTCLR_MODE_WIDTH 1 +#define FRF_AB_XM_CORE_RST_LBN 0 +#define FRF_AB_XM_CORE_RST_WIDTH 1 + +/* XM_TX_CFG_REG: XGMAC transmit configuration */ +#define FR_AB_XM_TX_CFG 0x00001230 +#define FRF_AB_XM_TX_PROG_LBN 24 +#define FRF_AB_XM_TX_PROG_WIDTH 1 +#define FRF_AB_XM_IPG_LBN 16 +#define FRF_AB_XM_IPG_WIDTH 4 +#define FRF_AB_XM_FCNTL_LBN 10 +#define FRF_AB_XM_FCNTL_WIDTH 1 +#define FRF_AB_XM_TXCRC_LBN 8 +#define FRF_AB_XM_TXCRC_WIDTH 1 +#define FRF_AB_XM_EDRC_LBN 6 +#define FRF_AB_XM_EDRC_WIDTH 1 +#define FRF_AB_XM_AUTO_PAD_LBN 5 +#define FRF_AB_XM_AUTO_PAD_WIDTH 1 +#define FRF_AB_XM_TX_PRMBL_LBN 2 +#define FRF_AB_XM_TX_PRMBL_WIDTH 1 +#define FRF_AB_XM_TXEN_LBN 1 +#define FRF_AB_XM_TXEN_WIDTH 1 +#define FRF_AB_XM_TX_RST_LBN 0 +#define FRF_AB_XM_TX_RST_WIDTH 1 + +/* XM_RX_CFG_REG: XGMAC receive configuration */ +#define FR_AB_XM_RX_CFG 0x00001240 +#define FRF_AB_XM_PASS_LENERR_LBN 26 +#define FRF_AB_XM_PASS_LENERR_WIDTH 1 +#define FRF_AB_XM_PASS_CRC_ERR_LBN 25 +#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 +#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 +#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_REJ_BCAST_LBN 20 +#define FRF_AB_XM_REJ_BCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 +#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 +#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 +#define FRF_AB_XM_AUTO_DEPAD_LBN 8 +#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 +#define FRF_AB_XM_RXCRC_LBN 3 +#define FRF_AB_XM_RXCRC_WIDTH 1 +#define FRF_AB_XM_RX_PRMBL_LBN 2 +#define FRF_AB_XM_RX_PRMBL_WIDTH 1 +#define FRF_AB_XM_RXEN_LBN 1 +#define FRF_AB_XM_RXEN_WIDTH 1 +#define FRF_AB_XM_RX_RST_LBN 0 +#define FRF_AB_XM_RX_RST_WIDTH 1 + +/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ +#define FR_AB_XM_MGT_INT_MASK 0x00001250 +#define FRF_AB_XM_MSK_STA_INTR_LBN 16 +#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_MSK_RMTFLT_LBN 1 +#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 +#define FRF_AB_XM_MSK_LCLFLT_LBN 0 +#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 + +/* XM_FC_REG: XGMAC flow control register */ +#define FR_AB_XM_FC 0x00001270 +#define FRF_AB_XM_PAUSE_TIME_LBN 16 +#define FRF_AB_XM_PAUSE_TIME_WIDTH 16 +#define FRF_AB_XM_RX_MAC_STAT_LBN 11 +#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_TX_MAC_STAT_LBN 10 +#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_MCNTL_PASS_LBN 8 +#define FRF_AB_XM_MCNTL_PASS_WIDTH 2 +#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 +#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 +#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 +#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 +#define FRF_AB_XM_ZPAUSE_LBN 2 +#define FRF_AB_XM_ZPAUSE_WIDTH 1 +#define FRF_AB_XM_XMIT_PAUSE_LBN 1 +#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 +#define FRF_AB_XM_DIS_FCNTL_LBN 0 +#define FRF_AB_XM_DIS_FCNTL_WIDTH 1 + +/* XM_PAUSE_TIME_REG: XGMAC pause time register */ +#define FR_AB_XM_PAUSE_TIME 0x00001290 +#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 +#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 +#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 +#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FR_AB_XM_TX_PARAM 0x000012d0 +#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 +#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 +#define FRF_AB_XM_PAD_CHAR_LBN 0 +#define FRF_AB_XM_PAD_CHAR_WIDTH 8 + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FR_AB_XM_RX_PARAM 0x000012e0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 + +/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ +#define FR_AB_XM_MGT_INT_MSK 0x000012f0 +#define FRF_AB_XM_STAT_CNTR_OF_LBN 9 +#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_STAT_CNTR_HF_LBN 8 +#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_RMTFLT_LBN 1 +#define FRF_AB_XM_RMTFLT_WIDTH 1 +#define FRF_AB_XM_LCLFLT_LBN 0 +#define FRF_AB_XM_LCLFLT_WIDTH 1 + +/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ +#define FR_AB_XX_PWR_RST 0x00001300 +#define FRF_AB_XX_PWRDND_SIG_LBN 31 +#define FRF_AB_XX_PWRDND_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNC_SIG_LBN 30 +#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNB_SIG_LBN 29 +#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNA_SIG_LBN 28 +#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 +#define FRF_AB_XX_SIM_MODE_LBN 27 +#define FRF_AB_XX_SIM_MODE_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 +#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 +#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETD_SIG_LBN 23 +#define FRF_AB_XX_RESETD_SIG_WIDTH 1 +#define FRF_AB_XX_RESETC_SIG_LBN 22 +#define FRF_AB_XX_RESETC_SIG_WIDTH 1 +#define FRF_AB_XX_RESETB_SIG_LBN 21 +#define FRF_AB_XX_RESETB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETA_SIG_LBN 20 +#define FRF_AB_XX_RESETA_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 +#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 +#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 +#define FRF_AB_XX_SD_RST_ACT_LBN 16 +#define FRF_AB_XX_SD_RST_ACT_WIDTH 1 +#define FRF_AB_XX_PWRDND_EN_LBN 15 +#define FRF_AB_XX_PWRDND_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNC_EN_LBN 14 +#define FRF_AB_XX_PWRDNC_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNB_EN_LBN 13 +#define FRF_AB_XX_PWRDNB_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNA_EN_LBN 12 +#define FRF_AB_XX_PWRDNA_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_EN_LBN 9 +#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_EN_LBN 8 +#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 +#define FRF_AB_XX_RESETD_EN_LBN 7 +#define FRF_AB_XX_RESETD_EN_WIDTH 1 +#define FRF_AB_XX_RESETC_EN_LBN 6 +#define FRF_AB_XX_RESETC_EN_WIDTH 1 +#define FRF_AB_XX_RESETB_EN_LBN 5 +#define FRF_AB_XX_RESETB_EN_WIDTH 1 +#define FRF_AB_XX_RESETA_EN_LBN 4 +#define FRF_AB_XX_RESETA_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 +#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 +#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 +#define FRF_AB_XX_RST_XX_EN_LBN 0 +#define FRF_AB_XX_RST_XX_EN_WIDTH 1 + +/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ +#define FR_AB_XX_SD_CTL 0x00001310 +#define FRF_AB_XX_TERMADJ1_LBN 17 +#define FRF_AB_XX_TERMADJ1_WIDTH 1 +#define FRF_AB_XX_TERMADJ0_LBN 16 +#define FRF_AB_XX_TERMADJ0_WIDTH 1 +#define FRF_AB_XX_HIDRVD_LBN 15 +#define FRF_AB_XX_HIDRVD_WIDTH 1 +#define FRF_AB_XX_LODRVD_LBN 14 +#define FRF_AB_XX_LODRVD_WIDTH 1 +#define FRF_AB_XX_HIDRVC_LBN 13 +#define FRF_AB_XX_HIDRVC_WIDTH 1 +#define FRF_AB_XX_LODRVC_LBN 12 +#define FRF_AB_XX_LODRVC_WIDTH 1 +#define FRF_AB_XX_HIDRVB_LBN 11 +#define FRF_AB_XX_HIDRVB_WIDTH 1 +#define FRF_AB_XX_LODRVB_LBN 10 +#define FRF_AB_XX_LODRVB_WIDTH 1 +#define FRF_AB_XX_HIDRVA_LBN 9 +#define FRF_AB_XX_HIDRVA_WIDTH 1 +#define FRF_AB_XX_LODRVA_LBN 8 +#define FRF_AB_XX_LODRVA_WIDTH 1 +#define FRF_AB_XX_LPBKD_LBN 3 +#define FRF_AB_XX_LPBKD_WIDTH 1 +#define FRF_AB_XX_LPBKC_LBN 2 +#define FRF_AB_XX_LPBKC_WIDTH 1 +#define FRF_AB_XX_LPBKB_LBN 1 +#define FRF_AB_XX_LPBKB_WIDTH 1 +#define FRF_AB_XX_LPBKA_LBN 0 +#define FRF_AB_XX_LPBKA_WIDTH 1 + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +#define FR_AB_XX_TXDRV_CTL 0x00001320 +#define FRF_AB_XX_DEQD_LBN 28 +#define FRF_AB_XX_DEQD_WIDTH 4 +#define FRF_AB_XX_DEQC_LBN 24 +#define FRF_AB_XX_DEQC_WIDTH 4 +#define FRF_AB_XX_DEQB_LBN 20 +#define FRF_AB_XX_DEQB_WIDTH 4 +#define FRF_AB_XX_DEQA_LBN 16 +#define FRF_AB_XX_DEQA_WIDTH 4 +#define FRF_AB_XX_DTXD_LBN 12 +#define FRF_AB_XX_DTXD_WIDTH 4 +#define FRF_AB_XX_DTXC_LBN 8 +#define FRF_AB_XX_DTXC_WIDTH 4 +#define FRF_AB_XX_DTXB_LBN 4 +#define FRF_AB_XX_DTXB_WIDTH 4 +#define FRF_AB_XX_DTXA_LBN 0 +#define FRF_AB_XX_DTXA_WIDTH 4 + +/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ +#define FR_AB_XX_PRBS_CTL 0x00001330 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 +#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 +#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 +#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 +#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 +#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 +#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 +#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 +#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 + +/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ +#define FR_AB_XX_PRBS_CHK 0x00001340 +#define FRF_AB_XX_REV_LB_EN_LBN 16 +#define FRF_AB_XX_REV_LB_EN_WIDTH 1 +#define FRF_AB_XX_CH3_DEG_DET_LBN 15 +#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 +#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH3_ERR_CHK_LBN 12 +#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH2_DEG_DET_LBN 11 +#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 +#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH2_ERR_CHK_LBN 8 +#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH1_DEG_DET_LBN 7 +#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 +#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH1_ERR_CHK_LBN 4 +#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH0_DEG_DET_LBN 3 +#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH0_ERR_CHK_LBN 0 +#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 + +/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ +#define FR_AB_XX_PRBS_ERR 0x00001350 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +#define FR_AB_XX_CORE_STAT 0x00001360 +#define FRF_AB_XX_FORCE_SIG3_LBN 31 +#define FRF_AB_XX_FORCE_SIG3_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 +#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_LBN 29 +#define FRF_AB_XX_FORCE_SIG2_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 +#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_LBN 27 +#define FRF_AB_XX_FORCE_SIG1_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 +#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_LBN 25 +#define FRF_AB_XX_FORCE_SIG0_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 +#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 +#define FRF_AB_XX_XGXS_LB_EN_LBN 23 +#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 +#define FRF_AB_XX_XGMII_LB_EN_LBN 22 +#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 +#define FRF_AB_XX_MATCH_FAULT_LBN 21 +#define FRF_AB_XX_MATCH_FAULT_WIDTH 1 +#define FRF_AB_XX_ALIGN_DONE_LBN 20 +#define FRF_AB_XX_ALIGN_DONE_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT3_LBN 19 +#define FRF_AB_XX_SYNC_STAT3_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT2_LBN 18 +#define FRF_AB_XX_SYNC_STAT2_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT1_LBN 17 +#define FRF_AB_XX_SYNC_STAT1_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT0_LBN 16 +#define FRF_AB_XX_SYNC_STAT0_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH3_LBN 15 +#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH2_LBN 14 +#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH1_LBN 13 +#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH0_LBN 12 +#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 +#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 +#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 +#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 +#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 +#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 +#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 +#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 +#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH3_LBN 3 +#define FRF_AB_XX_DISPERR_CH3_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH2_LBN 2 +#define FRF_AB_XX_DISPERR_CH2_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH1_LBN 1 +#define FRF_AB_XX_DISPERR_CH1_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH0_LBN 0 +#define FRF_AB_XX_DISPERR_CH0_WIDTH 1 + +/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ +#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 +#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 +/* RX_DESC_PTR_TBL: Receive descriptor pointer table */ +#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 +#define FR_BZ_RX_DESC_PTR_TBL_STEP 16 +#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_RX_HDR_SPLIT_LBN 90 +#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 +#define FRF_AA_RX_RESET_LBN 89 +#define FRF_AA_RX_RESET_WIDTH 1 +#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 +#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 +#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 +#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 +#define FRF_AZ_RX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_RX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_RX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_RX_DESCQ_SIZE_4K 3 +#define FFE_AZ_RX_DESCQ_SIZE_2K 2 +#define FFE_AZ_RX_DESCQ_SIZE_1K 1 +#define FFE_AZ_RX_DESCQ_SIZE_512 0 +#define FRF_AZ_RX_DESCQ_TYPE_LBN 2 +#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 +#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 +#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 +#define FRF_AZ_RX_DESCQ_EN_LBN 0 +#define FRF_AZ_RX_DESCQ_EN_WIDTH 1 + +/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ +#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 +#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 +/* TX_DESC_PTR_TBL: Transmit descriptor pointer */ +#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 +#define FR_BZ_TX_DESC_PTR_TBL_STEP 16 +#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 +#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 +#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 +#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 +#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 +#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 +#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 +#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 +#define FRF_AZ_TX_DESCQ_EN_LBN 88 +#define FRF_AZ_TX_DESCQ_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 +#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 +#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_TX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_TX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_TX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_TX_DESCQ_SIZE_4K 3 +#define FFE_AZ_TX_DESCQ_SIZE_2K 2 +#define FFE_AZ_TX_DESCQ_SIZE_1K 1 +#define FFE_AZ_TX_DESCQ_SIZE_512 0 +#define FRF_AZ_TX_DESCQ_TYPE_LBN 1 +#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 +#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 +#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 + +/* EVQ_PTR_TBL_KER: Event queue pointer table */ +#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 +#define FR_AA_EVQ_PTR_TBL_KER_STEP 16 +#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 +/* EVQ_PTR_TBL: Event queue pointer table */ +#define FR_BZ_EVQ_PTR_TBL 0x00f60000 +#define FR_BZ_EVQ_PTR_TBL_STEP 16 +#define FR_CZ_EVQ_PTR_TBL_ROWS 1024 +#define FR_BB_EVQ_PTR_TBL_ROWS 4096 +#define FRF_BZ_EVQ_RPTR_IGN_LBN 40 +#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 +#define FRF_AZ_EVQ_NXT_WPTR_LBN 24 +#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 +#define FRF_AZ_EVQ_EN_LBN 23 +#define FRF_AZ_EVQ_EN_WIDTH 1 +#define FRF_AZ_EVQ_SIZE_LBN 20 +#define FRF_AZ_EVQ_SIZE_WIDTH 3 +#define FFE_AZ_EVQ_SIZE_32K 6 +#define FFE_AZ_EVQ_SIZE_16K 5 +#define FFE_AZ_EVQ_SIZE_8K 4 +#define FFE_AZ_EVQ_SIZE_4K 3 +#define FFE_AZ_EVQ_SIZE_2K 2 +#define FFE_AZ_EVQ_SIZE_1K 1 +#define FFE_AZ_EVQ_SIZE_512 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 + +/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ +#define FR_AA_BUF_HALF_TBL_KER 0x00018000 +#define FR_AA_BUF_HALF_TBL_KER_STEP 8 +#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 +/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ +#define FR_BZ_BUF_HALF_TBL 0x00800000 +#define FR_BZ_BUF_HALF_TBL_STEP 8 +#define FR_CZ_BUF_HALF_TBL_ROWS 147456 +#define FR_BB_BUF_HALF_TBL_ROWS 524288 +#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 +#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 + +/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ +#define FR_AA_BUF_FULL_TBL_KER 0x00018000 +#define FR_AA_BUF_FULL_TBL_KER_STEP 8 +#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 +/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ +#define FR_BZ_BUF_FULL_TBL 0x00800000 +#define FR_BZ_BUF_FULL_TBL_STEP 8 +#define FR_CZ_BUF_FULL_TBL_ROWS 147456 +#define FR_BB_BUF_FULL_TBL_ROWS 917504 +#define FRF_AZ_BUF_FULL_UNUSED_LBN 51 +#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 +#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 +#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 +#define FRF_AZ_BUF_ADR_REGION_LBN 48 +#define FRF_AZ_BUF_ADR_REGION_WIDTH 2 +#define FFE_AZ_BUF_ADR_REGN3 3 +#define FFE_AZ_BUF_ADR_REGN2 2 +#define FFE_AZ_BUF_ADR_REGN1 1 +#define FFE_AZ_BUF_ADR_REGN0 0 +#define FRF_AZ_BUF_ADR_FBUF_LBN 14 +#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 +#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 + +/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ +#define FR_BZ_RX_FILTER_TBL0 0x00f00000 +#define FR_BZ_RX_FILTER_TBL0_STEP 32 +#define FR_BZ_RX_FILTER_TBL0_ROWS 8192 +/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ +#define FR_BB_RX_FILTER_TBL1 0x00f00010 +#define FR_BB_RX_FILTER_TBL1_STEP 32 +#define FR_BB_RX_FILTER_TBL1_ROWS 8192 +#define FRF_BZ_RSS_EN_LBN 110 +#define FRF_BZ_RSS_EN_WIDTH 1 +#define FRF_BZ_SCATTER_EN_LBN 109 +#define FRF_BZ_SCATTER_EN_WIDTH 1 +#define FRF_BZ_TCP_UDP_LBN 108 +#define FRF_BZ_TCP_UDP_WIDTH 1 +#define FRF_BZ_RXQ_ID_LBN 96 +#define FRF_BZ_RXQ_ID_WIDTH 12 +#define FRF_BZ_DEST_IP_LBN 64 +#define FRF_BZ_DEST_IP_WIDTH 32 +#define FRF_BZ_DEST_PORT_TCP_LBN 48 +#define FRF_BZ_DEST_PORT_TCP_WIDTH 16 +#define FRF_BZ_SRC_IP_LBN 16 +#define FRF_BZ_SRC_IP_WIDTH 32 +#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 + +/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ +#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 +#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 +#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_RMFT_RSS_EN_LBN 75 +#define FRF_CZ_RMFT_RSS_EN_WIDTH 1 +#define FRF_CZ_RMFT_SCATTER_EN_LBN 74 +#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 +#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 +#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_RMFT_RXQ_ID_LBN 61 +#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 +#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_RMFT_DEST_MAC_LBN 12 +#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48 +#define FRF_CZ_RMFT_VLAN_ID_LBN 0 +#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 + +/* TIMER_TBL: Timer table */ +#define FR_BZ_TIMER_TBL 0x00f70000 +#define FR_BZ_TIMER_TBL_STEP 16 +#define FR_CZ_TIMER_TBL_ROWS 1024 +#define FR_BB_TIMER_TBL_ROWS 4096 +#define FRF_CZ_TIMER_Q_EN_LBN 33 +#define FRF_CZ_TIMER_Q_EN_WIDTH 1 +#define FRF_CZ_INT_ARMD_LBN 32 +#define FRF_CZ_INT_ARMD_WIDTH 1 +#define FRF_CZ_INT_PEND_LBN 31 +#define FRF_CZ_INT_PEND_WIDTH 1 +#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 +#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 +#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 +#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 +#define FRF_CZ_TIMER_MODE_LBN 14 +#define FRF_CZ_TIMER_MODE_WIDTH 2 +#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 +#define FFE_CZ_TIMER_MODE_TRIG_START 2 +#define FFE_CZ_TIMER_MODE_IMMED_START 1 +#define FFE_CZ_TIMER_MODE_DIS 0 +#define FRF_BB_TIMER_MODE_LBN 12 +#define FRF_BB_TIMER_MODE_WIDTH 2 +#define FFE_BB_TIMER_MODE_INT_HLDOFF 2 +#define FFE_BB_TIMER_MODE_TRIG_START 2 +#define FFE_BB_TIMER_MODE_IMMED_START 1 +#define FFE_BB_TIMER_MODE_DIS 0 +#define FRF_CZ_TIMER_VAL_LBN 0 +#define FRF_CZ_TIMER_VAL_WIDTH 14 +#define FRF_BB_TIMER_VAL_LBN 0 +#define FRF_BB_TIMER_VAL_WIDTH 12 + +/* TX_PACE_TBL: Transmit pacing table */ +#define FR_BZ_TX_PACE_TBL 0x00f80000 +#define FR_BZ_TX_PACE_TBL_STEP 16 +#define FR_CZ_TX_PACE_TBL_ROWS 1024 +#define FR_BB_TX_PACE_TBL_ROWS 4096 +#define FRF_BZ_TX_PACE_LBN 0 +#define FRF_BZ_TX_PACE_WIDTH 5 + +/* RX_INDIRECTION_TBL: RX Indirection Table */ +#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 +#define FR_BZ_RX_INDIRECTION_TBL_STEP 16 +#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 +#define FRF_BZ_IT_QUEUE_LBN 0 +#define FRF_BZ_IT_QUEUE_WIDTH 6 + +/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ +#define FR_CZ_TX_FILTER_TBL0 0x00fc0000 +#define FR_CZ_TX_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_FILTER_TBL0_ROWS 8192 +#define FRF_CZ_TIFT_TCP_UDP_LBN 108 +#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 +#define FRF_CZ_TIFT_TXQ_ID_LBN 96 +#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TIFT_DEST_IP_LBN 64 +#define FRF_CZ_TIFT_DEST_IP_WIDTH 32 +#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 +#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 +#define FRF_CZ_TIFT_SRC_IP_LBN 16 +#define FRF_CZ_TIFT_SRC_IP_WIDTH 32 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 + +/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ +#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 +#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_TMFT_TXQ_ID_LBN 61 +#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_TMFT_SRC_MAC_LBN 12 +#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48 +#define FRF_CZ_TMFT_VLAN_ID_LBN 0 +#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 + +/* MC_TREG_SMEM: MC Shared Memory */ +#define FR_CZ_MC_TREG_SMEM 0x00ff0000 +#define FR_CZ_MC_TREG_SMEM_STEP 4 +#define FR_CZ_MC_TREG_SMEM_ROWS 512 +#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 +#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 + +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 +#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 +#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 +/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ +#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 +#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 +#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 +#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 +#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 +#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 +#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 + +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_BB_MSIX_PBA_TABLE 0x00ff2000 +#define FR_BZ_MSIX_PBA_TABLE_STEP 4 +#define FR_BB_MSIX_PBA_TABLE_ROWS 2 +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_MSIX_PBA_TABLE 0x00008000 +/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ +#define FR_CZ_MSIX_PBA_TABLE_ROWS 32 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* SRM_DBG_REG: SRAM debug access */ +#define FR_BZ_SRM_DBG 0x03000000 +#define FR_BZ_SRM_DBG_STEP 8 +#define FR_CZ_SRM_DBG_ROWS 262144 +#define FR_BB_SRM_DBG_ROWS 2097152 +#define FRF_BZ_SRM_DBG_LBN 0 +#define FRF_BZ_SRM_DBG_WIDTH 64 + +/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 +#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 +#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* DRIVER_EV */ +#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 +#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 +#define FSE_BZ_TX_DSC_ERROR_EV 15 +#define FSE_BZ_RX_DSC_ERROR_EV 14 +#define FSE_AA_RX_RECOVER_EV 11 +#define FSE_AZ_TIMER_EV 10 +#define FSE_AZ_TX_PKT_NON_TCP_UDP 9 +#define FSE_AZ_WAKE_UP_EV 6 +#define FSE_AZ_SRM_UPD_DONE_EV 5 +#define FSE_AB_EVQ_NOT_EN_EV 3 +#define FSE_AZ_EVQ_INIT_DONE_EV 2 +#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 +#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 + +/* EVENT_ENTRY */ +#define FSF_AZ_EV_CODE_LBN 60 +#define FSF_AZ_EV_CODE_WIDTH 4 +#define FSE_CZ_EV_CODE_MCDI_EV 12 +#define FSE_CZ_EV_CODE_USER_EV 8 +#define FSE_AZ_EV_CODE_DRV_GEN_EV 7 +#define FSE_AZ_EV_CODE_GLOBAL_EV 6 +#define FSE_AZ_EV_CODE_DRIVER_EV 5 +#define FSE_AZ_EV_CODE_TX_EV 2 +#define FSE_AZ_EV_CODE_RX_EV 0 +#define FSF_AZ_EV_DATA_LBN 0 +#define FSF_AZ_EV_DATA_WIDTH 60 + +/* GLOBAL_EV */ +#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 +#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 +#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 +#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 +#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 + +/* LEGACY_INT_VEC */ +#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 +#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 +#define FSF_AZ_NET_IVEC_INT_Q_LBN 40 +#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 +#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 +#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 + +/* MC_XGMAC_FLTR_RULE_DEF */ +#define FSF_CZ_MC_XFRC_MODE_LBN 416 +#define FSF_CZ_MC_XFRC_MODE_WIDTH 1 +#define FSE_CZ_MC_XFRC_MODE_LAYERED 1 +#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 +#define FSF_CZ_MC_XFRC_HASH_LBN 384 +#define FSF_CZ_MC_XFRC_HASH_WIDTH 32 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 + +/* RX_EV */ +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 +#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 +#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_OK_LBN 56 +#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 +#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 +#define FSF_AA_RX_EV_DRIB_NIB_LBN 49 +#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 +#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 +#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 +#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 +#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 +#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 +#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 +#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 +#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 +#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 +#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 +#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 +#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 +#define FSF_AZ_RX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 +#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 +#define FSF_AZ_RX_EV_PORT_LBN 30 +#define FSF_AZ_RX_EV_PORT_WIDTH 1 +#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 +#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 +#define FSF_AZ_RX_EV_SOP_LBN 15 +#define FSF_AZ_RX_EV_SOP_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 + +/* RX_KER_DESC */ +#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 +#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 +#define FSF_AZ_RX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 + +/* RX_USER_DESC */ +#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 +#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 +#define FSF_AZ_RX_USER_BUF_ID_LBN 0 +#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 + +/* TX_EV */ +#define FSF_AZ_TX_EV_PKT_ERR_LBN 38 +#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 +#define FSF_AZ_TX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_TX_EV_PORT_LBN 16 +#define FSF_AZ_TX_EV_PORT_WIDTH 1 +#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 +#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_COMP_LBN 12 +#define FSF_AZ_TX_EV_COMP_WIDTH 1 +#define FSF_AZ_TX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 + +/* TX_KER_DESC */ +#define FSF_AZ_TX_KER_CONT_LBN 62 +#define FSF_AZ_TX_KER_CONT_WIDTH 1 +#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 +#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 +#define FSF_AZ_TX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 + +/* TX_USER_DESC */ +#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 +#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 +#define FSF_AZ_TX_USER_CONT_LBN 46 +#define FSF_AZ_TX_USER_CONT_WIDTH 1 +#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 +#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 +#define FSF_AZ_TX_USER_BUF_ID_LBN 13 +#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 +#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 +#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 + +/* USER_EV */ +#define FSF_CZ_USER_QID_LBN 32 +#define FSF_CZ_USER_QID_WIDTH 10 +#define FSF_CZ_USER_EV_REG_VALUE_LBN 0 +#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 + +/************************************************************************** + * + * Falcon B0 PCIe core indirect registers + * + ************************************************************************** + */ + +#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 + +#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 + +#define FPCR_BB_ACK_RPL_TIMER 0x700 +#define FPCRF_BB_ACK_TL_LBN 0 +#define FPCRF_BB_ACK_TL_WIDTH 16 +#define FPCRF_BB_RPL_TL_LBN 16 +#define FPCRF_BB_RPL_TL_WIDTH 16 + +#define FPCR_BB_ACK_FREQ 0x70C +#define FPCRF_BB_ACK_FREQ_LBN 0 +#define FPCRF_BB_ACK_FREQ_WIDTH 7 + +/************************************************************************** + * + * Pseudo-registers and fields + * + ************************************************************************** + */ + +/* Interrupt acknowledge work-around register (A0/A1 only) */ +#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 + +/* EE_SPI_HCMD_REG: SPI host command register */ +/* Values for the EE_SPI_HCMD_SF_SEL register field */ +#define FFE_AB_SPI_DEVICE_EEPROM 0 +#define FFE_AB_SPI_DEVICE_FLASH 1 + +/* NIC_STAT_REG: NIC status register */ +#define FRF_AB_STRAP_10G_LBN 2 +#define FRF_AB_STRAP_10G_WIDTH 1 +#define FRF_AA_STRAP_PCIE_LBN 0 +#define FRF_AA_STRAP_PCIE_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FRF_AZ_FATAL_INTR_LBN 0 +#define FRF_AZ_FATAL_INTR_WIDTH 12 + +/* SRM_CFG_REG: SRAM configuration register */ +/* We treat the number of SRAM banks and bank size as a single field */ +#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN +#define FRF_AZ_SRM_NB_SZ_WIDTH \ + (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) +#define FFE_AB_SRM_NB1_SZ2M 0 +#define FFE_AB_SRM_NB1_SZ4M 1 +#define FFE_AB_SRM_NB1_SZ8M 2 +#define FFE_AB_SRM_NB_SZ_DEF 3 +#define FFE_AB_SRM_NB2_SZ4M 4 +#define FFE_AB_SRM_NB2_SZ8M 5 +#define FFE_AB_SRM_NB2_SZ16M 6 +#define FFE_AB_SRM_NB_SZ_RES 7 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +/* We write just the last dword of these registers */ +#define FR_AZ_RX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ + FR_BZ_RX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_AZ_TX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ + FR_BZ_TX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +/* Default values */ +#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ +#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ +#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +/* XGXS all-lanes status fields */ +#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN +#define FRF_AB_XX_SYNC_STAT_WIDTH 4 +#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN +#define FRF_AB_XX_COMMA_DET_WIDTH 4 +#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN +#define FRF_AB_XX_CHAR_ERR_WIDTH 4 +#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN +#define FRF_AB_XX_DISPERR_WIDTH 4 +#define FFE_AB_XX_STAT_ALL_LANES 0xf +#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN +#define FRF_AB_XX_FORCE_SIG_WIDTH 8 +#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff + +/* RX_MAC_FILTER_TBL0 */ +/* RMFT_DEST_MAC is wider than 32 bits */ +#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN +#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 +#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32) +#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32) + +/* TX_MAC_FILTER_TBL0 */ +/* TMFT_SRC_MAC is wider than 32 bits */ +#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN +#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 +#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32) +#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32) + +/* TX_PACE_TBL */ +/* Values >20 are documented as reserved, but will result in a queue going + * into the fast bin with a pace value of zero. */ +#define FFE_BZ_TX_PACE_OFF 0 +#define FFE_BZ_TX_PACE_RESERVED 21 + +/* DRIVER_EV */ +/* Sub-fields of an RX flush completion event */ +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 + +/* EVENT_ENTRY */ +/* Magic number field for event test */ +#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 +#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 + +#endif /* EFX_FARCH_REGS_H */ diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 30d7442..fb01fdb 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -13,7 +13,7 @@ #include "filter.h" #include "io.h" #include "nic.h" -#include "regs.h" +#include "farch_regs.h" /* "Fudge factors" - difference between programmed value and actual depth. * Due to pipelined implementation we need to program H/W with a value that diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d7d3a8a..dffadb2 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -11,7 +11,7 @@ #include "net_driver.h" #include "nic.h" #include "io.h" -#include "regs.h" +#include "farch_regs.h" #include "mcdi_pcol.h" #include "phy.h" diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 372891c..f758333 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -19,7 +19,7 @@ #include "bitfield.h" #include "efx.h" #include "nic.h" -#include "regs.h" +#include "farch_regs.h" #include "io.h" #include "workarounds.h" diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 56a8b88..59e09e1 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -46,7 +46,7 @@ #include "mcdi.h" #include "mcdi_pcol.h" #include "io.h" -#include "regs.h" +#include "farch_regs.h" #include "nic.h" /* Maximum number of events expected to make up a PTP event */ diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/regs.h deleted file mode 100644 index 27ad348d..0000000 --- a/drivers/net/ethernet/sfc/regs.h +++ /dev/null @@ -1,2928 +0,0 @@ -/**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -#ifndef EFX_REGS_H -#define EFX_REGS_H - -/* - * Falcon hardware architecture definitions have a name prefix following - * the format: - * - * F__ - * - * The following strings are used: - * - * MMIO register MC register Host memory structure - * ------------------------------------------------------------- - * Address R MCR - * Bitfield RF MCRF SF - * Enumerator FE MCFE SE - * - * is the first revision to which the definition applies: - * - * A: Falcon A1 (SFC4000AB) - * B: Falcon B0 (SFC4000BA) - * C: Siena A0 (SFL9021AA) - * - * If the definition has been changed or removed in later revisions - * then is the last revision to which the definition applies; - * otherwise it is "Z". - */ - -/************************************************************************** - * - * Falcon/Siena registers and descriptors - * - ************************************************************************** - */ - -/* ADR_REGION_REG: Address region register */ -#define FR_AZ_ADR_REGION 0x00000000 -#define FRF_AZ_ADR_REGION3_LBN 96 -#define FRF_AZ_ADR_REGION3_WIDTH 18 -#define FRF_AZ_ADR_REGION2_LBN 64 -#define FRF_AZ_ADR_REGION2_WIDTH 18 -#define FRF_AZ_ADR_REGION1_LBN 32 -#define FRF_AZ_ADR_REGION1_WIDTH 18 -#define FRF_AZ_ADR_REGION0_LBN 0 -#define FRF_AZ_ADR_REGION0_WIDTH 18 - -/* INT_EN_REG_KER: Kernel driver Interrupt enable register */ -#define FR_AZ_INT_EN_KER 0x00000010 -#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 -#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 -#define FRF_AZ_KER_INT_CHAR_LBN 4 -#define FRF_AZ_KER_INT_CHAR_WIDTH 1 -#define FRF_AZ_KER_INT_KER_LBN 3 -#define FRF_AZ_KER_INT_KER_WIDTH 1 -#define FRF_AZ_DRV_INT_EN_KER_LBN 0 -#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 - -/* INT_EN_REG_CHAR: Char Driver interrupt enable register */ -#define FR_BZ_INT_EN_CHAR 0x00000020 -#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 -#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 -#define FRF_BZ_CHAR_INT_CHAR_LBN 4 -#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 -#define FRF_BZ_CHAR_INT_KER_LBN 3 -#define FRF_BZ_CHAR_INT_KER_WIDTH 1 -#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 -#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 - -/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ -#define FR_AZ_INT_ADR_KER 0x00000030 -#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 -#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 -#define FRF_AZ_INT_ADR_KER_LBN 0 -#define FRF_AZ_INT_ADR_KER_WIDTH 64 - -/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ -#define FR_BZ_INT_ADR_CHAR 0x00000040 -#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 -#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 -#define FRF_BZ_INT_ADR_CHAR_LBN 0 -#define FRF_BZ_INT_ADR_CHAR_WIDTH 64 - -/* INT_ACK_KER: Kernel interrupt acknowledge register */ -#define FR_AA_INT_ACK_KER 0x00000050 -#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 -#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 - -/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ -#define FR_BZ_INT_ISR0 0x00000090 -#define FRF_BZ_INT_ISR_REG_LBN 0 -#define FRF_BZ_INT_ISR_REG_WIDTH 64 - -/* HW_INIT_REG: Hardware initialization register */ -#define FR_AZ_HW_INIT 0x000000c0 -#define FRF_BB_BDMRD_CPLF_FULL_LBN 124 -#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 -#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 -#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 -#define FRF_CZ_TX_MRG_TAGS_LBN 120 -#define FRF_CZ_TX_MRG_TAGS_WIDTH 1 -#define FRF_AB_TRGT_MASK_ALL_LBN 100 -#define FRF_AB_TRGT_MASK_ALL_WIDTH 1 -#define FRF_AZ_DOORBELL_DROP_LBN 92 -#define FRF_AZ_DOORBELL_DROP_WIDTH 8 -#define FRF_AB_TX_RREQ_MASK_EN_LBN 76 -#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 -#define FRF_AB_PE_EIDLE_DIS_LBN 75 -#define FRF_AB_PE_EIDLE_DIS_WIDTH 1 -#define FRF_AA_FC_BLOCKING_EN_LBN 45 -#define FRF_AA_FC_BLOCKING_EN_WIDTH 1 -#define FRF_BZ_B2B_REQ_EN_LBN 45 -#define FRF_BZ_B2B_REQ_EN_WIDTH 1 -#define FRF_AA_B2B_REQ_EN_LBN 44 -#define FRF_AA_B2B_REQ_EN_WIDTH 1 -#define FRF_BB_FC_BLOCKING_EN_LBN 44 -#define FRF_BB_FC_BLOCKING_EN_WIDTH 1 -#define FRF_AZ_POST_WR_MASK_LBN 40 -#define FRF_AZ_POST_WR_MASK_WIDTH 4 -#define FRF_AZ_TLP_TC_LBN 34 -#define FRF_AZ_TLP_TC_WIDTH 3 -#define FRF_AZ_TLP_ATTR_LBN 32 -#define FRF_AZ_TLP_ATTR_WIDTH 2 -#define FRF_AB_INTB_VEC_LBN 24 -#define FRF_AB_INTB_VEC_WIDTH 5 -#define FRF_AB_INTA_VEC_LBN 16 -#define FRF_AB_INTA_VEC_WIDTH 5 -#define FRF_AZ_WD_TIMER_LBN 8 -#define FRF_AZ_WD_TIMER_WIDTH 8 -#define FRF_AZ_US_DISABLE_LBN 5 -#define FRF_AZ_US_DISABLE_WIDTH 1 -#define FRF_AZ_TLP_EP_LBN 4 -#define FRF_AZ_TLP_EP_WIDTH 1 -#define FRF_AZ_ATTR_SEL_LBN 3 -#define FRF_AZ_ATTR_SEL_WIDTH 1 -#define FRF_AZ_TD_SEL_LBN 1 -#define FRF_AZ_TD_SEL_WIDTH 1 -#define FRF_AZ_TLP_TD_LBN 0 -#define FRF_AZ_TLP_TD_WIDTH 1 - -/* EE_SPI_HCMD_REG: SPI host command register */ -#define FR_AB_EE_SPI_HCMD 0x00000100 -#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 -#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 -#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 -#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 -#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 -#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 -#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 -#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 -#define FRF_AB_EE_SPI_HCMD_READ_LBN 15 -#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 -#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 -#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 -#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 -#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 -#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 -#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 - -/* USR_EV_CFG: User Level Event Configuration register */ -#define FR_CZ_USR_EV_CFG 0x00000100 -#define FRF_CZ_USREV_DIS_LBN 16 -#define FRF_CZ_USREV_DIS_WIDTH 1 -#define FRF_CZ_DFLT_EVQ_LBN 0 -#define FRF_CZ_DFLT_EVQ_WIDTH 10 - -/* EE_SPI_HADR_REG: SPI host address register */ -#define FR_AB_EE_SPI_HADR 0x00000110 -#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 -#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 -#define FRF_AB_EE_SPI_HADR_ADR_LBN 0 -#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 - -/* EE_SPI_HDATA_REG: SPI host data register */ -#define FR_AB_EE_SPI_HDATA 0x00000120 -#define FRF_AB_EE_SPI_HDATA3_LBN 96 -#define FRF_AB_EE_SPI_HDATA3_WIDTH 32 -#define FRF_AB_EE_SPI_HDATA2_LBN 64 -#define FRF_AB_EE_SPI_HDATA2_WIDTH 32 -#define FRF_AB_EE_SPI_HDATA1_LBN 32 -#define FRF_AB_EE_SPI_HDATA1_WIDTH 32 -#define FRF_AB_EE_SPI_HDATA0_LBN 0 -#define FRF_AB_EE_SPI_HDATA0_WIDTH 32 - -/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ -#define FR_AB_EE_BASE_PAGE 0x00000130 -#define FRF_AB_EE_EXPROM_MASK_LBN 16 -#define FRF_AB_EE_EXPROM_MASK_WIDTH 13 -#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 -#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 - -/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ -#define FR_AB_EE_VPD_CFG0 0x00000140 -#define FRF_AB_EE_SF_FASTRD_EN_LBN 127 -#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 -#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 -#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 -#define FRF_AB_EE_VPD_WIP_POLL_LBN 119 -#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 -#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 -#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 -#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 -#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 -#define FRF_AB_EE_VPDW_LENGTH_LBN 80 -#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 -#define FRF_AB_EE_VPDW_BASE_LBN 64 -#define FRF_AB_EE_VPDW_BASE_WIDTH 15 -#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 -#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 -#define FRF_AB_EE_VPD_BASE_LBN 32 -#define FRF_AB_EE_VPD_BASE_WIDTH 24 -#define FRF_AB_EE_VPD_LENGTH_LBN 16 -#define FRF_AB_EE_VPD_LENGTH_WIDTH 15 -#define FRF_AB_EE_VPD_AD_SIZE_LBN 8 -#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 -#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 -#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 -#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 -#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 -#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 -#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 -#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 -#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 -#define FRF_AB_EE_VPD_EN_LBN 0 -#define FRF_AB_EE_VPD_EN_WIDTH 1 - -/* EE_VPD_SW_CNTL_REG: VPD access SW control register */ -#define FR_AB_EE_VPD_SW_CNTL 0x00000150 -#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 -#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 -#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 -#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 -#define FRF_AB_EE_VPD_CYC_ADR_LBN 0 -#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 - -/* EE_VPD_SW_DATA_REG: VPD access SW data register */ -#define FR_AB_EE_VPD_SW_DATA 0x00000160 -#define FRF_AB_EE_VPD_CYC_DAT_LBN 0 -#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 - -/* PBMX_DBG_IADDR_REG: Capture Module address register */ -#define FR_CZ_PBMX_DBG_IADDR 0x000001f0 -#define FRF_CZ_PBMX_DBG_IADDR_LBN 0 -#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 - -/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ -#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 -#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 -#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 -#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 -#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 -#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 -#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 - -/* PBMX_DBG_IDATA_REG: Capture Module data register */ -#define FR_CZ_PBMX_DBG_IDATA 0x000001f8 -#define FRF_CZ_PBMX_DBG_IDATA_LBN 0 -#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 - -/* NIC_STAT_REG: NIC status register */ -#define FR_AB_NIC_STAT 0x00000200 -#define FRF_BB_AER_DIS_LBN 34 -#define FRF_BB_AER_DIS_WIDTH 1 -#define FRF_BB_EE_STRAP_EN_LBN 31 -#define FRF_BB_EE_STRAP_EN_WIDTH 1 -#define FRF_BB_EE_STRAP_LBN 24 -#define FRF_BB_EE_STRAP_WIDTH 4 -#define FRF_BB_REVISION_ID_LBN 17 -#define FRF_BB_REVISION_ID_WIDTH 7 -#define FRF_AB_ONCHIP_SRAM_LBN 16 -#define FRF_AB_ONCHIP_SRAM_WIDTH 1 -#define FRF_AB_SF_PRST_LBN 9 -#define FRF_AB_SF_PRST_WIDTH 1 -#define FRF_AB_EE_PRST_LBN 8 -#define FRF_AB_EE_PRST_WIDTH 1 -#define FRF_AB_ATE_MODE_LBN 3 -#define FRF_AB_ATE_MODE_WIDTH 1 -#define FRF_AB_STRAP_PINS_LBN 0 -#define FRF_AB_STRAP_PINS_WIDTH 3 - -/* GPIO_CTL_REG: GPIO control register */ -#define FR_AB_GPIO_CTL 0x00000210 -#define FRF_AB_GPIO_OUT3_LBN 112 -#define FRF_AB_GPIO_OUT3_WIDTH 16 -#define FRF_AB_GPIO_IN3_LBN 104 -#define FRF_AB_GPIO_IN3_WIDTH 8 -#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 -#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 -#define FRF_AB_GPIO_OUT2_LBN 80 -#define FRF_AB_GPIO_OUT2_WIDTH 16 -#define FRF_AB_GPIO_IN2_LBN 72 -#define FRF_AB_GPIO_IN2_WIDTH 8 -#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 -#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 -#define FRF_AB_GPIO15_OEN_LBN 63 -#define FRF_AB_GPIO15_OEN_WIDTH 1 -#define FRF_AB_GPIO14_OEN_LBN 62 -#define FRF_AB_GPIO14_OEN_WIDTH 1 -#define FRF_AB_GPIO13_OEN_LBN 61 -#define FRF_AB_GPIO13_OEN_WIDTH 1 -#define FRF_AB_GPIO12_OEN_LBN 60 -#define FRF_AB_GPIO12_OEN_WIDTH 1 -#define FRF_AB_GPIO11_OEN_LBN 59 -#define FRF_AB_GPIO11_OEN_WIDTH 1 -#define FRF_AB_GPIO10_OEN_LBN 58 -#define FRF_AB_GPIO10_OEN_WIDTH 1 -#define FRF_AB_GPIO9_OEN_LBN 57 -#define FRF_AB_GPIO9_OEN_WIDTH 1 -#define FRF_AB_GPIO8_OEN_LBN 56 -#define FRF_AB_GPIO8_OEN_WIDTH 1 -#define FRF_AB_GPIO15_OUT_LBN 55 -#define FRF_AB_GPIO15_OUT_WIDTH 1 -#define FRF_AB_GPIO14_OUT_LBN 54 -#define FRF_AB_GPIO14_OUT_WIDTH 1 -#define FRF_AB_GPIO13_OUT_LBN 53 -#define FRF_AB_GPIO13_OUT_WIDTH 1 -#define FRF_AB_GPIO12_OUT_LBN 52 -#define FRF_AB_GPIO12_OUT_WIDTH 1 -#define FRF_AB_GPIO11_OUT_LBN 51 -#define FRF_AB_GPIO11_OUT_WIDTH 1 -#define FRF_AB_GPIO10_OUT_LBN 50 -#define FRF_AB_GPIO10_OUT_WIDTH 1 -#define FRF_AB_GPIO9_OUT_LBN 49 -#define FRF_AB_GPIO9_OUT_WIDTH 1 -#define FRF_AB_GPIO8_OUT_LBN 48 -#define FRF_AB_GPIO8_OUT_WIDTH 1 -#define FRF_AB_GPIO15_IN_LBN 47 -#define FRF_AB_GPIO15_IN_WIDTH 1 -#define FRF_AB_GPIO14_IN_LBN 46 -#define FRF_AB_GPIO14_IN_WIDTH 1 -#define FRF_AB_GPIO13_IN_LBN 45 -#define FRF_AB_GPIO13_IN_WIDTH 1 -#define FRF_AB_GPIO12_IN_LBN 44 -#define FRF_AB_GPIO12_IN_WIDTH 1 -#define FRF_AB_GPIO11_IN_LBN 43 -#define FRF_AB_GPIO11_IN_WIDTH 1 -#define FRF_AB_GPIO10_IN_LBN 42 -#define FRF_AB_GPIO10_IN_WIDTH 1 -#define FRF_AB_GPIO9_IN_LBN 41 -#define FRF_AB_GPIO9_IN_WIDTH 1 -#define FRF_AB_GPIO8_IN_LBN 40 -#define FRF_AB_GPIO8_IN_WIDTH 1 -#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 -#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 -#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 -#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 -#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 -#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 -#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 -#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 -#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_CLK156_OUT_EN_LBN 31 -#define FRF_AB_CLK156_OUT_EN_WIDTH 1 -#define FRF_AB_USE_NIC_CLK_LBN 30 -#define FRF_AB_USE_NIC_CLK_WIDTH 1 -#define FRF_AB_GPIO5_OEN_LBN 29 -#define FRF_AB_GPIO5_OEN_WIDTH 1 -#define FRF_AB_GPIO4_OEN_LBN 28 -#define FRF_AB_GPIO4_OEN_WIDTH 1 -#define FRF_AB_GPIO3_OEN_LBN 27 -#define FRF_AB_GPIO3_OEN_WIDTH 1 -#define FRF_AB_GPIO2_OEN_LBN 26 -#define FRF_AB_GPIO2_OEN_WIDTH 1 -#define FRF_AB_GPIO1_OEN_LBN 25 -#define FRF_AB_GPIO1_OEN_WIDTH 1 -#define FRF_AB_GPIO0_OEN_LBN 24 -#define FRF_AB_GPIO0_OEN_WIDTH 1 -#define FRF_AB_GPIO7_OUT_LBN 23 -#define FRF_AB_GPIO7_OUT_WIDTH 1 -#define FRF_AB_GPIO6_OUT_LBN 22 -#define FRF_AB_GPIO6_OUT_WIDTH 1 -#define FRF_AB_GPIO5_OUT_LBN 21 -#define FRF_AB_GPIO5_OUT_WIDTH 1 -#define FRF_AB_GPIO4_OUT_LBN 20 -#define FRF_AB_GPIO4_OUT_WIDTH 1 -#define FRF_AB_GPIO3_OUT_LBN 19 -#define FRF_AB_GPIO3_OUT_WIDTH 1 -#define FRF_AB_GPIO2_OUT_LBN 18 -#define FRF_AB_GPIO2_OUT_WIDTH 1 -#define FRF_AB_GPIO1_OUT_LBN 17 -#define FRF_AB_GPIO1_OUT_WIDTH 1 -#define FRF_AB_GPIO0_OUT_LBN 16 -#define FRF_AB_GPIO0_OUT_WIDTH 1 -#define FRF_AB_GPIO7_IN_LBN 15 -#define FRF_AB_GPIO7_IN_WIDTH 1 -#define FRF_AB_GPIO6_IN_LBN 14 -#define FRF_AB_GPIO6_IN_WIDTH 1 -#define FRF_AB_GPIO5_IN_LBN 13 -#define FRF_AB_GPIO5_IN_WIDTH 1 -#define FRF_AB_GPIO4_IN_LBN 12 -#define FRF_AB_GPIO4_IN_WIDTH 1 -#define FRF_AB_GPIO3_IN_LBN 11 -#define FRF_AB_GPIO3_IN_WIDTH 1 -#define FRF_AB_GPIO2_IN_LBN 10 -#define FRF_AB_GPIO2_IN_WIDTH 1 -#define FRF_AB_GPIO1_IN_LBN 9 -#define FRF_AB_GPIO1_IN_WIDTH 1 -#define FRF_AB_GPIO0_IN_LBN 8 -#define FRF_AB_GPIO0_IN_WIDTH 1 -#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 -#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 -#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 -#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 -#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 -#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 -#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 -#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 -#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 - -/* GLB_CTL_REG: Global control register */ -#define FR_AB_GLB_CTL 0x00000220 -#define FRF_AB_EXT_PHY_RST_CTL_LBN 63 -#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 -#define FRF_AB_XAUI_SD_RST_CTL_LBN 62 -#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_SD_RST_CTL_LBN 61 -#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 -#define FRF_AA_PCIX_RST_CTL_LBN 60 -#define FRF_AA_PCIX_RST_CTL_WIDTH 1 -#define FRF_BB_BIU_RST_CTL_LBN 60 -#define FRF_BB_BIU_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 -#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 -#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 -#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 -#define FRF_AB_XGRX_RST_CTL_LBN 56 -#define FRF_AB_XGRX_RST_CTL_WIDTH 1 -#define FRF_AB_XGTX_RST_CTL_LBN 55 -#define FRF_AB_XGTX_RST_CTL_WIDTH 1 -#define FRF_AB_EM_RST_CTL_LBN 54 -#define FRF_AB_EM_RST_CTL_WIDTH 1 -#define FRF_AB_EV_RST_CTL_LBN 53 -#define FRF_AB_EV_RST_CTL_WIDTH 1 -#define FRF_AB_SR_RST_CTL_LBN 52 -#define FRF_AB_SR_RST_CTL_WIDTH 1 -#define FRF_AB_RX_RST_CTL_LBN 51 -#define FRF_AB_RX_RST_CTL_WIDTH 1 -#define FRF_AB_TX_RST_CTL_LBN 50 -#define FRF_AB_TX_RST_CTL_WIDTH 1 -#define FRF_AB_EE_RST_CTL_LBN 49 -#define FRF_AB_EE_RST_CTL_WIDTH 1 -#define FRF_AB_CS_RST_CTL_LBN 48 -#define FRF_AB_CS_RST_CTL_WIDTH 1 -#define FRF_AB_HOT_RST_CTL_LBN 40 -#define FRF_AB_HOT_RST_CTL_WIDTH 2 -#define FRF_AB_RST_EXT_PHY_LBN 31 -#define FRF_AB_RST_EXT_PHY_WIDTH 1 -#define FRF_AB_RST_XAUI_SD_LBN 30 -#define FRF_AB_RST_XAUI_SD_WIDTH 1 -#define FRF_AB_RST_PCIE_SD_LBN 29 -#define FRF_AB_RST_PCIE_SD_WIDTH 1 -#define FRF_AA_RST_PCIX_LBN 28 -#define FRF_AA_RST_PCIX_WIDTH 1 -#define FRF_BB_RST_BIU_LBN 28 -#define FRF_BB_RST_BIU_WIDTH 1 -#define FRF_AB_RST_PCIE_STKY_LBN 27 -#define FRF_AB_RST_PCIE_STKY_WIDTH 1 -#define FRF_AB_RST_PCIE_NSTKY_LBN 26 -#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 -#define FRF_AB_RST_PCIE_CORE_LBN 25 -#define FRF_AB_RST_PCIE_CORE_WIDTH 1 -#define FRF_AB_RST_XGRX_LBN 24 -#define FRF_AB_RST_XGRX_WIDTH 1 -#define FRF_AB_RST_XGTX_LBN 23 -#define FRF_AB_RST_XGTX_WIDTH 1 -#define FRF_AB_RST_EM_LBN 22 -#define FRF_AB_RST_EM_WIDTH 1 -#define FRF_AB_RST_EV_LBN 21 -#define FRF_AB_RST_EV_WIDTH 1 -#define FRF_AB_RST_SR_LBN 20 -#define FRF_AB_RST_SR_WIDTH 1 -#define FRF_AB_RST_RX_LBN 19 -#define FRF_AB_RST_RX_WIDTH 1 -#define FRF_AB_RST_TX_LBN 18 -#define FRF_AB_RST_TX_WIDTH 1 -#define FRF_AB_RST_SF_LBN 17 -#define FRF_AB_RST_SF_WIDTH 1 -#define FRF_AB_RST_CS_LBN 16 -#define FRF_AB_RST_CS_WIDTH 1 -#define FRF_AB_INT_RST_DUR_LBN 4 -#define FRF_AB_INT_RST_DUR_WIDTH 3 -#define FRF_AB_EXT_PHY_RST_DUR_LBN 1 -#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 -#define FFE_AB_EXT_PHY_RST_DUR_10240US 7 -#define FFE_AB_EXT_PHY_RST_DUR_5120US 6 -#define FFE_AB_EXT_PHY_RST_DUR_2560US 5 -#define FFE_AB_EXT_PHY_RST_DUR_1280US 4 -#define FFE_AB_EXT_PHY_RST_DUR_640US 3 -#define FFE_AB_EXT_PHY_RST_DUR_320US 2 -#define FFE_AB_EXT_PHY_RST_DUR_160US 1 -#define FFE_AB_EXT_PHY_RST_DUR_80US 0 -#define FRF_AB_SWRST_LBN 0 -#define FRF_AB_SWRST_WIDTH 1 - -/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ -#define FR_AZ_FATAL_INTR_KER 0x00000230 -#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 -#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 -#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 -#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 -#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 -#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 -#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 -#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 -#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 -#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 -#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 -#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 -#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 -#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 -#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 -#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 -#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 -#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 -#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 -#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 -#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 -#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 -#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 -#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 -#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 -#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_KER_LBN 11 -#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 -#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 -#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 -#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 -#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 -#define FRF_AZ_MEM_PERR_INT_KER_LBN 8 -#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 -#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 -#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 -#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 -#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 -#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 -#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 -#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 -#define FRF_AZ_ILL_ADR_INT_KER_LBN 1 -#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 -#define FRF_AZ_SRM_PERR_INT_KER_LBN 0 -#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 - -/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ -#define FR_BZ_FATAL_INTR_CHAR 0x00000240 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 -#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 -#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 -#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 -#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 -#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 -#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 -#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 -#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 -#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 -#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 -#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 -#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 -#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 -#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 -#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 -#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 -#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 -#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 -#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 -#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 -#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 -#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 -#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 -#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 -#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 -#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 -#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 -#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 -#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 -#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 - -/* DP_CTRL_REG: Datapath control register */ -#define FR_BZ_DP_CTRL 0x00000250 -#define FRF_BZ_FLS_EVQ_ID_LBN 0 -#define FRF_BZ_FLS_EVQ_ID_WIDTH 12 - -/* MEM_STAT_REG: Memory status register */ -#define FR_AZ_MEM_STAT 0x00000260 -#define FRF_AB_MEM_PERR_VEC_LBN 53 -#define FRF_AB_MEM_PERR_VEC_WIDTH 38 -#define FRF_AB_MBIST_CORR_LBN 38 -#define FRF_AB_MBIST_CORR_WIDTH 15 -#define FRF_AB_MBIST_ERR_LBN 0 -#define FRF_AB_MBIST_ERR_WIDTH 40 -#define FRF_CZ_MEM_PERR_VEC_LBN 0 -#define FRF_CZ_MEM_PERR_VEC_WIDTH 35 - -/* CS_DEBUG_REG: Debug register */ -#define FR_AZ_CS_DEBUG 0x00000270 -#define FRF_AB_GLB_DEBUG2_SEL_LBN 50 -#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 -#define FRF_AB_DEBUG_BLK_SEL2_LBN 47 -#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 -#define FRF_AB_DEBUG_BLK_SEL1_LBN 44 -#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 -#define FRF_AB_DEBUG_BLK_SEL0_LBN 41 -#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 -#define FRF_CZ_CS_PORT_NUM_LBN 40 -#define FRF_CZ_CS_PORT_NUM_WIDTH 2 -#define FRF_AB_MISC_DEBUG_ADDR_LBN 36 -#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 -#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 -#define FRF_CZ_CS_PORT_FPE_LBN 1 -#define FRF_CZ_CS_PORT_FPE_WIDTH 35 -#define FRF_AB_EM_DEBUG_ADDR_LBN 26 -#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_SR_DEBUG_ADDR_LBN 21 -#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_EV_DEBUG_ADDR_LBN 16 -#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_RX_DEBUG_ADDR_LBN 11 -#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_TX_DEBUG_ADDR_LBN 6 -#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 -#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 -#define FRF_AZ_CS_DEBUG_EN_LBN 0 -#define FRF_AZ_CS_DEBUG_EN_WIDTH 1 - -/* DRIVER_REG: Driver scratch register [0-7] */ -#define FR_AZ_DRIVER 0x00000280 -#define FR_AZ_DRIVER_STEP 16 -#define FR_AZ_DRIVER_ROWS 8 -#define FRF_AZ_DRIVER_DW0_LBN 0 -#define FRF_AZ_DRIVER_DW0_WIDTH 32 - -/* ALTERA_BUILD_REG: Altera build register */ -#define FR_AZ_ALTERA_BUILD 0x00000300 -#define FRF_AZ_ALTERA_BUILD_VER_LBN 0 -#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 - -/* CSR_SPARE_REG: Spare register */ -#define FR_AZ_CSR_SPARE 0x00000310 -#define FRF_AB_MEM_PERR_EN_LBN 64 -#define FRF_AB_MEM_PERR_EN_WIDTH 38 -#define FRF_CZ_MEM_PERR_EN_LBN 64 -#define FRF_CZ_MEM_PERR_EN_WIDTH 35 -#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 -#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 -#define FRF_AZ_CSR_SPARE_BITS_LBN 0 -#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 - -/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ -#define FR_AB_PCIE_SD_CTL0123 0x00000320 -#define FRF_AB_PCIE_TESTSIG_H_LBN 96 -#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 -#define FRF_AB_PCIE_TESTSIG_L_LBN 64 -#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 -#define FRF_AB_PCIE_OFFSET_LBN 56 -#define FRF_AB_PCIE_OFFSET_WIDTH 8 -#define FRF_AB_PCIE_OFFSETEN_H_LBN 55 -#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 -#define FRF_AB_PCIE_OFFSETEN_L_LBN 54 -#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 -#define FRF_AB_PCIE_HIVMODE_H_LBN 53 -#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 -#define FRF_AB_PCIE_HIVMODE_L_LBN 52 -#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 -#define FRF_AB_PCIE_PARRESET_H_LBN 51 -#define FRF_AB_PCIE_PARRESET_H_WIDTH 1 -#define FRF_AB_PCIE_PARRESET_L_LBN 50 -#define FRF_AB_PCIE_PARRESET_L_WIDTH 1 -#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 -#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 -#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 -#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 -#define FRF_AB_PCIE_LPBK_LBN 40 -#define FRF_AB_PCIE_LPBK_WIDTH 8 -#define FRF_AB_PCIE_PARLPBK_LBN 32 -#define FRF_AB_PCIE_PARLPBK_WIDTH 8 -#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 -#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 -#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 -#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 -#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 -#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 -#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 -#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 -#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 -#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 -#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 -#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 -#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 -#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 -#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 -#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 -#define FRF_AB_PCIE_RXEQCTL_H_LBN 18 -#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 -#define FRF_AB_PCIE_RXEQCTL_L_LBN 16 -#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 -#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 -#define FFE_AB_PCIE_RXEQCTL_OFF 2 -#define FFE_AB_PCIE_RXEQCTL_MIN 1 -#define FFE_AB_PCIE_RXEQCTL_MAX 0 -#define FRF_AB_PCIE_HIDRV_LBN 8 -#define FRF_AB_PCIE_HIDRV_WIDTH 8 -#define FRF_AB_PCIE_LODRV_LBN 0 -#define FRF_AB_PCIE_LODRV_WIDTH 8 - -/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ -#define FR_AB_PCIE_SD_CTL45 0x00000330 -#define FRF_AB_PCIE_DTX7_LBN 60 -#define FRF_AB_PCIE_DTX7_WIDTH 4 -#define FRF_AB_PCIE_DTX6_LBN 56 -#define FRF_AB_PCIE_DTX6_WIDTH 4 -#define FRF_AB_PCIE_DTX5_LBN 52 -#define FRF_AB_PCIE_DTX5_WIDTH 4 -#define FRF_AB_PCIE_DTX4_LBN 48 -#define FRF_AB_PCIE_DTX4_WIDTH 4 -#define FRF_AB_PCIE_DTX3_LBN 44 -#define FRF_AB_PCIE_DTX3_WIDTH 4 -#define FRF_AB_PCIE_DTX2_LBN 40 -#define FRF_AB_PCIE_DTX2_WIDTH 4 -#define FRF_AB_PCIE_DTX1_LBN 36 -#define FRF_AB_PCIE_DTX1_WIDTH 4 -#define FRF_AB_PCIE_DTX0_LBN 32 -#define FRF_AB_PCIE_DTX0_WIDTH 4 -#define FRF_AB_PCIE_DEQ7_LBN 28 -#define FRF_AB_PCIE_DEQ7_WIDTH 4 -#define FRF_AB_PCIE_DEQ6_LBN 24 -#define FRF_AB_PCIE_DEQ6_WIDTH 4 -#define FRF_AB_PCIE_DEQ5_LBN 20 -#define FRF_AB_PCIE_DEQ5_WIDTH 4 -#define FRF_AB_PCIE_DEQ4_LBN 16 -#define FRF_AB_PCIE_DEQ4_WIDTH 4 -#define FRF_AB_PCIE_DEQ3_LBN 12 -#define FRF_AB_PCIE_DEQ3_WIDTH 4 -#define FRF_AB_PCIE_DEQ2_LBN 8 -#define FRF_AB_PCIE_DEQ2_WIDTH 4 -#define FRF_AB_PCIE_DEQ1_LBN 4 -#define FRF_AB_PCIE_DEQ1_WIDTH 4 -#define FRF_AB_PCIE_DEQ0_LBN 0 -#define FRF_AB_PCIE_DEQ0_WIDTH 4 - -/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ -#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 -#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 -#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 -#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 -#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 -#define FRF_AB_PCIE_PRBSERR_LBN 40 -#define FRF_AB_PCIE_PRBSERR_WIDTH 8 -#define FRF_AB_PCIE_PRBSERRH0_LBN 32 -#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 -#define FRF_AB_PCIE_FASTINIT_H_LBN 15 -#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 -#define FRF_AB_PCIE_FASTINIT_L_LBN 14 -#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 -#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 -#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 -#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 -#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 -#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 -#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 -#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 -#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 -#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 -#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 -#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 -#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 -#define FRF_AB_PCIE_PRBSSEL_LBN 0 -#define FRF_AB_PCIE_PRBSSEL_WIDTH 8 - -/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ -#define FR_BB_DEBUG_DATA_OUT 0x00000350 -#define FRF_BB_DEBUG2_PORT_LBN 25 -#define FRF_BB_DEBUG2_PORT_WIDTH 15 -#define FRF_BB_DEBUG1_PORT_LBN 0 -#define FRF_BB_DEBUG1_PORT_WIDTH 25 - -/* EVQ_RPTR_REGP0: Event queue read pointer register */ -#define FR_BZ_EVQ_RPTR_P0 0x00000400 -#define FR_BZ_EVQ_RPTR_P0_STEP 8192 -#define FR_BZ_EVQ_RPTR_P0_ROWS 1024 -/* EVQ_RPTR_REG_KER: Event queue read pointer register */ -#define FR_AA_EVQ_RPTR_KER 0x00011b00 -#define FR_AA_EVQ_RPTR_KER_STEP 4 -#define FR_AA_EVQ_RPTR_KER_ROWS 4 -/* EVQ_RPTR_REG: Event queue read pointer register */ -#define FR_BZ_EVQ_RPTR 0x00fa0000 -#define FR_BZ_EVQ_RPTR_STEP 16 -#define FR_BB_EVQ_RPTR_ROWS 4096 -#define FR_CZ_EVQ_RPTR_ROWS 1024 -/* EVQ_RPTR_REGP123: Event queue read pointer register */ -#define FR_BB_EVQ_RPTR_P123 0x01000400 -#define FR_BB_EVQ_RPTR_P123_STEP 8192 -#define FR_BB_EVQ_RPTR_P123_ROWS 3072 -#define FRF_AZ_EVQ_RPTR_VLD_LBN 15 -#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 -#define FRF_AZ_EVQ_RPTR_LBN 0 -#define FRF_AZ_EVQ_RPTR_WIDTH 15 - -/* TIMER_COMMAND_REGP0: Timer Command Registers */ -#define FR_BZ_TIMER_COMMAND_P0 0x00000420 -#define FR_BZ_TIMER_COMMAND_P0_STEP 8192 -#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 -/* TIMER_COMMAND_REG_KER: Timer Command Registers */ -#define FR_AA_TIMER_COMMAND_KER 0x00000420 -#define FR_AA_TIMER_COMMAND_KER_STEP 8192 -#define FR_AA_TIMER_COMMAND_KER_ROWS 4 -/* TIMER_COMMAND_REGP123: Timer Command Registers */ -#define FR_BB_TIMER_COMMAND_P123 0x01000420 -#define FR_BB_TIMER_COMMAND_P123_STEP 8192 -#define FR_BB_TIMER_COMMAND_P123_ROWS 3072 -#define FRF_CZ_TC_TIMER_MODE_LBN 14 -#define FRF_CZ_TC_TIMER_MODE_WIDTH 2 -#define FRF_AB_TC_TIMER_MODE_LBN 12 -#define FRF_AB_TC_TIMER_MODE_WIDTH 2 -#define FRF_CZ_TC_TIMER_VAL_LBN 0 -#define FRF_CZ_TC_TIMER_VAL_WIDTH 14 -#define FRF_AB_TC_TIMER_VAL_LBN 0 -#define FRF_AB_TC_TIMER_VAL_WIDTH 12 - -/* DRV_EV_REG: Driver generated event register */ -#define FR_AZ_DRV_EV 0x00000440 -#define FRF_AZ_DRV_EV_QID_LBN 64 -#define FRF_AZ_DRV_EV_QID_WIDTH 12 -#define FRF_AZ_DRV_EV_DATA_LBN 0 -#define FRF_AZ_DRV_EV_DATA_WIDTH 64 - -/* EVQ_CTL_REG: Event queue control register */ -#define FR_AZ_EVQ_CTL 0x00000450 -#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 -#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 -#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 -#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 -#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 -#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 -#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 -#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 -#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 -#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 - -/* EVQ_CNT1_REG: Event counter 1 register */ -#define FR_AZ_EVQ_CNT1 0x00000460 -#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 -#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 -#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 -#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 -#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 -#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 -#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 -#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 -#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 -#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 - -/* EVQ_CNT2_REG: Event counter 2 register */ -#define FR_AZ_EVQ_CNT2 0x00000470 -#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 -#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 -#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_RDY_CNT_LBN 80 -#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 -#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 -#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 -#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 -#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 -#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 - -/* USR_EV_REG: Event mailbox register */ -#define FR_CZ_USR_EV 0x00000540 -#define FR_CZ_USR_EV_STEP 8192 -#define FR_CZ_USR_EV_ROWS 1024 -#define FRF_CZ_USR_EV_DATA_LBN 0 -#define FRF_CZ_USR_EV_DATA_WIDTH 32 - -/* BUF_TBL_CFG_REG: Buffer table configuration register */ -#define FR_AZ_BUF_TBL_CFG 0x00000600 -#define FRF_AZ_BUF_TBL_MODE_LBN 3 -#define FRF_AZ_BUF_TBL_MODE_WIDTH 1 - -/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ -#define FR_AZ_SRM_RX_DC_CFG 0x00000610 -#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 -#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 -#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 -#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 - -/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ -#define FR_AZ_SRM_TX_DC_CFG 0x00000620 -#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 -#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 - -/* SRM_CFG_REG: SRAM configuration register */ -#define FR_AZ_SRM_CFG 0x00000630 -#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 -#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 -#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 -#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 -#define FRF_AZ_SRM_INIT_EN_LBN 3 -#define FRF_AZ_SRM_INIT_EN_WIDTH 1 -#define FRF_AZ_SRM_NUM_BANK_LBN 2 -#define FRF_AZ_SRM_NUM_BANK_WIDTH 1 -#define FRF_AZ_SRM_BANK_SIZE_LBN 0 -#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 - -/* BUF_TBL_UPD_REG: Buffer table update register */ -#define FR_AZ_BUF_TBL_UPD 0x00000650 -#define FRF_AZ_BUF_UPD_CMD_LBN 63 -#define FRF_AZ_BUF_UPD_CMD_WIDTH 1 -#define FRF_AZ_BUF_CLR_CMD_LBN 62 -#define FRF_AZ_BUF_CLR_CMD_WIDTH 1 -#define FRF_AZ_BUF_CLR_END_ID_LBN 32 -#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 -#define FRF_AZ_BUF_CLR_START_ID_LBN 0 -#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 - -/* SRM_UPD_EVQ_REG: Buffer table update register */ -#define FR_AZ_SRM_UPD_EVQ 0x00000660 -#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 -#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 - -/* SRAM_PARITY_REG: SRAM parity register. */ -#define FR_AZ_SRAM_PARITY 0x00000670 -#define FRF_CZ_BYPASS_ECC_LBN 3 -#define FRF_CZ_BYPASS_ECC_WIDTH 1 -#define FRF_CZ_SEC_INT_LBN 2 -#define FRF_CZ_SEC_INT_WIDTH 1 -#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 -#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 -#define FRF_AB_FORCE_SRAM_PERR_LBN 0 -#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 -#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 -#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 - -/* RX_CFG_REG: Receive configuration register */ -#define FR_AZ_RX_CFG 0x00000800 -#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 -#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 -#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 -#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 -#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 -#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 -#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 -#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 -#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 -#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 -#define FRF_BZ_RX_TCP_SUP_LBN 48 -#define FRF_BZ_RX_TCP_SUP_WIDTH 1 -#define FRF_BZ_RX_INGR_EN_LBN 47 -#define FRF_BZ_RX_INGR_EN_WIDTH 1 -#define FRF_BZ_RX_IP_HASH_LBN 46 -#define FRF_BZ_RX_IP_HASH_WIDTH 1 -#define FRF_BZ_RX_HASH_ALG_LBN 45 -#define FRF_BZ_RX_HASH_ALG_WIDTH 1 -#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 -#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 -#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 -#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 -#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 -#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 -#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 -#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 -#define FRF_BZ_RX_OWNERR_CTL_LBN 38 -#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 -#define FRF_BZ_RX_XON_TX_TH_LBN 33 -#define FRF_BZ_RX_XON_TX_TH_WIDTH 5 -#define FRF_AA_RX_DESC_PUSH_EN_LBN 35 -#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 -#define FRF_AA_RX_RDW_PATCH_EN_LBN 34 -#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 -#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 -#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 -#define FRF_BZ_RX_XOFF_TX_TH_LBN 28 -#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 -#define FRF_AA_RX_OWNERR_CTL_LBN 30 -#define FRF_AA_RX_OWNERR_CTL_WIDTH 1 -#define FRF_AA_RX_XON_TX_TH_LBN 25 -#define FRF_AA_RX_XON_TX_TH_WIDTH 5 -#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 -#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 -#define FRF_AA_RX_XOFF_TX_TH_LBN 20 -#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 -#define FRF_AA_RX_USR_BUF_SIZE_LBN 11 -#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 -#define FRF_BZ_RX_XON_MAC_TH_LBN 10 -#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 -#define FRF_AA_RX_XON_MAC_TH_LBN 6 -#define FRF_AA_RX_XON_MAC_TH_WIDTH 5 -#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 -#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 -#define FRF_AA_RX_XOFF_MAC_TH_LBN 1 -#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 -#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 -#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 - -/* RX_FILTER_CTL_REG: Receive filter control registers */ -#define FR_BZ_RX_FILTER_CTL 0x00000810 -#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 -#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 -#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 -#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 -#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 -#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 -#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 -#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 -#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 -#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 -#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 -#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 -#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 -#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 -#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 -#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 -#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 -#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 -#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 -#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 -#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 -#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 -#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 -#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 -#define FRF_BZ_NUM_KER_LBN 24 -#define FRF_BZ_NUM_KER_WIDTH 2 -#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 -#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 -#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 -#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 -#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 -#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 - -/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ -#define FR_AZ_RX_FLUSH_DESCQ 0x00000820 -#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 -#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 -#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 -#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 - -/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ -#define FR_BZ_RX_DESC_UPD_P0 0x00000830 -#define FR_BZ_RX_DESC_UPD_P0_STEP 8192 -#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 -/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ -#define FR_AA_RX_DESC_UPD_KER 0x00000830 -#define FR_AA_RX_DESC_UPD_KER_STEP 8192 -#define FR_AA_RX_DESC_UPD_KER_ROWS 4 -/* RX_DESC_UPD_REGP123: Receive descriptor update register. */ -#define FR_BB_RX_DESC_UPD_P123 0x01000830 -#define FR_BB_RX_DESC_UPD_P123_STEP 8192 -#define FR_BB_RX_DESC_UPD_P123_ROWS 3072 -#define FRF_AZ_RX_DESC_WPTR_LBN 96 -#define FRF_AZ_RX_DESC_WPTR_WIDTH 12 -#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 -#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 -#define FRF_AZ_RX_DESC_LBN 0 -#define FRF_AZ_RX_DESC_WIDTH 64 - -/* RX_DC_CFG_REG: Receive descriptor cache configuration register */ -#define FR_AZ_RX_DC_CFG 0x00000840 -#define FRF_AB_RX_MAX_PF_LBN 2 -#define FRF_AB_RX_MAX_PF_WIDTH 2 -#define FRF_AZ_RX_DC_SIZE_LBN 0 -#define FRF_AZ_RX_DC_SIZE_WIDTH 2 -#define FFE_AZ_RX_DC_SIZE_64 3 -#define FFE_AZ_RX_DC_SIZE_32 2 -#define FFE_AZ_RX_DC_SIZE_16 1 -#define FFE_AZ_RX_DC_SIZE_8 0 - -/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ -#define FR_AZ_RX_DC_PF_WM 0x00000850 -#define FRF_AZ_RX_DC_PF_HWM_LBN 6 -#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 -#define FRF_AZ_RX_DC_PF_LWM_LBN 0 -#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 - -/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ -#define FR_BZ_RX_RSS_TKEY 0x00000860 -#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 -#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 -#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 -#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 - -/* RX_NODESC_DROP_REG: Receive dropped packet counter register */ -#define FR_AZ_RX_NODESC_DROP 0x00000880 -#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 -#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 -#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 -#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 - -/* RX_SELF_RST_REG: Receive self reset register */ -#define FR_AA_RX_SELF_RST 0x00000890 -#define FRF_AA_RX_ISCSI_DIS_LBN 17 -#define FRF_AA_RX_ISCSI_DIS_WIDTH 1 -#define FRF_AA_RX_SW_RST_REG_LBN 16 -#define FRF_AA_RX_SW_RST_REG_WIDTH 1 -#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 -#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 -#define FRF_AA_RX_SELF_RST_EN_LBN 8 -#define FRF_AA_RX_SELF_RST_EN_WIDTH 1 -#define FRF_AA_RX_MAX_PF_LAT_LBN 4 -#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 -#define FRF_AA_RX_MAX_LU_LAT_LBN 0 -#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 - -/* RX_DEBUG_REG: undocumented register */ -#define FR_AZ_RX_DEBUG 0x000008a0 -#define FRF_AZ_RX_DEBUG_LBN 0 -#define FRF_AZ_RX_DEBUG_WIDTH 64 - -/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ -#define FR_AZ_RX_PUSH_DROP 0x000008b0 -#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 -#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 - -/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ -#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 - -/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ -#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 - -/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ -#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 -#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 -#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 -#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 -#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 -#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 -#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 -#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 - -/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ -#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 -#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 -#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 -#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 -#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 - -/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ -#define FR_BZ_TX_DESC_UPD_P0 0x00000a10 -#define FR_BZ_TX_DESC_UPD_P0_STEP 8192 -#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 -/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ -#define FR_AA_TX_DESC_UPD_KER 0x00000a10 -#define FR_AA_TX_DESC_UPD_KER_STEP 8192 -#define FR_AA_TX_DESC_UPD_KER_ROWS 8 -/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ -#define FR_BB_TX_DESC_UPD_P123 0x01000a10 -#define FR_BB_TX_DESC_UPD_P123_STEP 8192 -#define FR_BB_TX_DESC_UPD_P123_ROWS 3072 -#define FRF_AZ_TX_DESC_WPTR_LBN 96 -#define FRF_AZ_TX_DESC_WPTR_WIDTH 12 -#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 -#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 -#define FRF_AZ_TX_DESC_LBN 0 -#define FRF_AZ_TX_DESC_WIDTH 95 - -/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ -#define FR_AZ_TX_DC_CFG 0x00000a20 -#define FRF_AZ_TX_DC_SIZE_LBN 0 -#define FRF_AZ_TX_DC_SIZE_WIDTH 2 -#define FFE_AZ_TX_DC_SIZE_32 2 -#define FFE_AZ_TX_DC_SIZE_16 1 -#define FFE_AZ_TX_DC_SIZE_8 0 - -/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ -#define FR_AA_TX_CHKSM_CFG 0x00000a30 -#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 -#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 -#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 -#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 -#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 -#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 -#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 -#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 - -/* TX_CFG_REG: Transmit configuration register */ -#define FR_AZ_TX_CFG 0x00000a50 -#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 -#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 -#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 -#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 -#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 -#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 -#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 -#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 -#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 -#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 -#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 -#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 -#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 -#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 -#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 -#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 -#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 -#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 -#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 -#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 -#define FRF_AZ_TX_P1_PRI_EN_LBN 4 -#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 -#define FRF_AZ_TX_OWNERR_CTL_LBN 2 -#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 -#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 -#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 -#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 -#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 - -/* TX_PUSH_DROP_REG: Transmit push dropped register */ -#define FR_AZ_TX_PUSH_DROP 0x00000a60 -#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 -#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 - -/* TX_RESERVED_REG: Transmit configuration register */ -#define FR_AZ_TX_RESERVED 0x00000a80 -#define FRF_AZ_TX_EVT_CNT_LBN 121 -#define FRF_AZ_TX_EVT_CNT_WIDTH 7 -#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 -#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 -#define FRF_AZ_TX_RD_COMP_TMR_LBN 96 -#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 -#define FRF_AZ_TX_PUSH_EN_LBN 89 -#define FRF_AZ_TX_PUSH_EN_WIDTH 1 -#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 -#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 -#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 -#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 -#define FRF_AZ_TX_DMAR_ST_P0_LBN 81 -#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 -#define FRF_AZ_TX_DMAQ_ST_LBN 78 -#define FRF_AZ_TX_DMAQ_ST_WIDTH 1 -#define FRF_AZ_TX_RX_SPACER_LBN 64 -#define FRF_AZ_TX_RX_SPACER_WIDTH 8 -#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 -#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 -#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 -#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 -#define FRF_AZ_TX_PS_EVT_DIS_LBN 58 -#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 -#define FRF_AZ_TX_RX_SPACER_EN_LBN 57 -#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 -#define FRF_AZ_TX_XP_TIMER_LBN 52 -#define FRF_AZ_TX_XP_TIMER_WIDTH 5 -#define FRF_AZ_TX_PREF_SPACER_LBN 44 -#define FRF_AZ_TX_PREF_SPACER_WIDTH 8 -#define FRF_AZ_TX_PREF_WD_TMR_LBN 22 -#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 -#define FRF_AZ_TX_ONLY1TAG_LBN 21 -#define FRF_AZ_TX_ONLY1TAG_WIDTH 1 -#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 -#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 -#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 -#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 -#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 -#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 -#define FRF_AA_TX_DMA_FF_THR_LBN 16 -#define FRF_AA_TX_DMA_FF_THR_WIDTH 1 -#define FRF_AZ_TX_DMA_SPACER_LBN 8 -#define FRF_AZ_TX_DMA_SPACER_WIDTH 8 -#define FRF_AA_TX_TCP_DIS_LBN 7 -#define FRF_AA_TX_TCP_DIS_WIDTH 1 -#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 -#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 -#define FRF_AA_TX_IP_DIS_LBN 6 -#define FRF_AA_TX_IP_DIS_WIDTH 1 -#define FRF_AZ_TX_MAX_CPL_LBN 2 -#define FRF_AZ_TX_MAX_CPL_WIDTH 2 -#define FFE_AZ_TX_MAX_CPL_16 3 -#define FFE_AZ_TX_MAX_CPL_8 2 -#define FFE_AZ_TX_MAX_CPL_4 1 -#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 -#define FRF_AZ_TX_MAX_PREF_LBN 0 -#define FRF_AZ_TX_MAX_PREF_WIDTH 2 -#define FFE_AZ_TX_MAX_PREF_32 3 -#define FFE_AZ_TX_MAX_PREF_16 2 -#define FFE_AZ_TX_MAX_PREF_8 1 -#define FFE_AZ_TX_MAX_PREF_OFF 0 - -/* TX_PACE_REG: Transmit pace control register */ -#define FR_BZ_TX_PACE 0x00000a90 -#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 -#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 -#define FRF_BZ_TX_PACE_SB_AF_LBN 9 -#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 -#define FRF_BZ_TX_PACE_FB_BASE_LBN 5 -#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 -#define FRF_BZ_TX_PACE_BIN_TH_LBN 0 -#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 - -/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ -#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 -#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 -#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 - -/* TX_VLAN_REG: Transmit VLAN tag register */ -#define FR_BB_TX_VLAN 0x00000ae0 -#define FRF_BB_TX_VLAN_EN_LBN 127 -#define FRF_BB_TX_VLAN_EN_WIDTH 1 -#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 -#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 -#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN7_LBN 112 -#define FRF_BB_TX_VLAN7_WIDTH 12 -#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 -#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 -#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN6_LBN 96 -#define FRF_BB_TX_VLAN6_WIDTH 12 -#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 -#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 -#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN5_LBN 80 -#define FRF_BB_TX_VLAN5_WIDTH 12 -#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 -#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 -#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN4_LBN 64 -#define FRF_BB_TX_VLAN4_WIDTH 12 -#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 -#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 -#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN3_LBN 48 -#define FRF_BB_TX_VLAN3_WIDTH 12 -#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 -#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 -#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN2_LBN 32 -#define FRF_BB_TX_VLAN2_WIDTH 12 -#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 -#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 -#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN1_LBN 16 -#define FRF_BB_TX_VLAN1_WIDTH 12 -#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 -#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 -#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN0_LBN 0 -#define FRF_BB_TX_VLAN0_WIDTH 12 - -/* TX_IPFIL_PORTEN_REG: Transmit filter control register */ -#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 -#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 -#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 -#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 -#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 -#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 -#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 -#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 -#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 -#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 -#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 -#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 -#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 -#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 -#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 -#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 -#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 -#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 -#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 -#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 -#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 -#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 -#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 -#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 -#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 -#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 -#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 -#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 -#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 -#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 -#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 -#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 -#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 -#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 -#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 - -/* TX_IPFIL_TBL: Transmit IP source address filter table */ -#define FR_BB_TX_IPFIL_TBL 0x00000b00 -#define FR_BB_TX_IPFIL_TBL_STEP 16 -#define FR_BB_TX_IPFIL_TBL_ROWS 16 -#define FRF_BB_TX_IPFIL_MASK_1_LBN 96 -#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 -#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 -#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 -#define FRF_BB_TX_IPFIL_MASK_0_LBN 32 -#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 -#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 -#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 - -/* MD_TXD_REG: PHY management transmit data register */ -#define FR_AB_MD_TXD 0x00000c00 -#define FRF_AB_MD_TXD_LBN 0 -#define FRF_AB_MD_TXD_WIDTH 16 - -/* MD_RXD_REG: PHY management receive data register */ -#define FR_AB_MD_RXD 0x00000c10 -#define FRF_AB_MD_RXD_LBN 0 -#define FRF_AB_MD_RXD_WIDTH 16 - -/* MD_CS_REG: PHY management configuration & status register */ -#define FR_AB_MD_CS 0x00000c20 -#define FRF_AB_MD_RD_EN_CMD_LBN 15 -#define FRF_AB_MD_RD_EN_CMD_WIDTH 1 -#define FRF_AB_MD_WR_EN_CMD_LBN 14 -#define FRF_AB_MD_WR_EN_CMD_WIDTH 1 -#define FRF_AB_MD_ADDR_CMD_LBN 13 -#define FRF_AB_MD_ADDR_CMD_WIDTH 1 -#define FRF_AB_MD_PT_LBN 7 -#define FRF_AB_MD_PT_WIDTH 3 -#define FRF_AB_MD_PL_LBN 6 -#define FRF_AB_MD_PL_WIDTH 1 -#define FRF_AB_MD_INT_CLR_LBN 5 -#define FRF_AB_MD_INT_CLR_WIDTH 1 -#define FRF_AB_MD_GC_LBN 4 -#define FRF_AB_MD_GC_WIDTH 1 -#define FRF_AB_MD_PRSP_LBN 3 -#define FRF_AB_MD_PRSP_WIDTH 1 -#define FRF_AB_MD_RIC_LBN 2 -#define FRF_AB_MD_RIC_WIDTH 1 -#define FRF_AB_MD_RDC_LBN 1 -#define FRF_AB_MD_RDC_WIDTH 1 -#define FRF_AB_MD_WRC_LBN 0 -#define FRF_AB_MD_WRC_WIDTH 1 - -/* MD_PHY_ADR_REG: PHY management PHY address register */ -#define FR_AB_MD_PHY_ADR 0x00000c30 -#define FRF_AB_MD_PHY_ADR_LBN 0 -#define FRF_AB_MD_PHY_ADR_WIDTH 16 - -/* MD_ID_REG: PHY management ID register */ -#define FR_AB_MD_ID 0x00000c40 -#define FRF_AB_MD_PRT_ADR_LBN 11 -#define FRF_AB_MD_PRT_ADR_WIDTH 5 -#define FRF_AB_MD_DEV_ADR_LBN 6 -#define FRF_AB_MD_DEV_ADR_WIDTH 5 - -/* MD_STAT_REG: PHY management status & mask register */ -#define FR_AB_MD_STAT 0x00000c50 -#define FRF_AB_MD_PINT_LBN 4 -#define FRF_AB_MD_PINT_WIDTH 1 -#define FRF_AB_MD_DONE_LBN 3 -#define FRF_AB_MD_DONE_WIDTH 1 -#define FRF_AB_MD_BSERR_LBN 2 -#define FRF_AB_MD_BSERR_WIDTH 1 -#define FRF_AB_MD_LNFL_LBN 1 -#define FRF_AB_MD_LNFL_WIDTH 1 -#define FRF_AB_MD_BSY_LBN 0 -#define FRF_AB_MD_BSY_WIDTH 1 - -/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ -#define FR_AB_MAC_STAT_DMA 0x00000c60 -#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 -#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 -#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 -#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 - -/* MAC_CTRL_REG: Port MAC control register */ -#define FR_AB_MAC_CTRL 0x00000c80 -#define FRF_AB_MAC_XOFF_VAL_LBN 16 -#define FRF_AB_MAC_XOFF_VAL_WIDTH 16 -#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 -#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 -#define FRF_AB_MAC_XG_DISTXCRC_LBN 5 -#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 -#define FRF_AB_MAC_BCAD_ACPT_LBN 4 -#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 -#define FRF_AB_MAC_UC_PROM_LBN 3 -#define FRF_AB_MAC_UC_PROM_WIDTH 1 -#define FRF_AB_MAC_LINK_STATUS_LBN 2 -#define FRF_AB_MAC_LINK_STATUS_WIDTH 1 -#define FRF_AB_MAC_SPEED_LBN 0 -#define FRF_AB_MAC_SPEED_WIDTH 2 -#define FFE_AB_MAC_SPEED_10G 3 -#define FFE_AB_MAC_SPEED_1G 2 -#define FFE_AB_MAC_SPEED_100M 1 -#define FFE_AB_MAC_SPEED_10M 0 - -/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ -#define FR_BB_GEN_MODE 0x00000c90 -#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 -#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 -#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 -#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 -#define FRF_BB_XFP_PHY_INT_MASK_LBN 1 -#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 -#define FRF_BB_XG_PHY_INT_MASK_LBN 0 -#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 - -/* MAC_MC_HASH_REG0: Multicast address hash table */ -#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 -#define FRF_AB_MAC_MCAST_HASH0_LBN 0 -#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 - -/* MAC_MC_HASH_REG1: Multicast address hash table */ -#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 -#define FRF_AB_MAC_MCAST_HASH1_LBN 0 -#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 - -/* GM_CFG1_REG: GMAC configuration register 1 */ -#define FR_AB_GM_CFG1 0x00000e00 -#define FRF_AB_GM_SW_RST_LBN 31 -#define FRF_AB_GM_SW_RST_WIDTH 1 -#define FRF_AB_GM_SIM_RST_LBN 30 -#define FRF_AB_GM_SIM_RST_WIDTH 1 -#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 -#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 -#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 -#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 -#define FRF_AB_GM_RST_RX_FUNC_LBN 17 -#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 -#define FRF_AB_GM_RST_TX_FUNC_LBN 16 -#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 -#define FRF_AB_GM_LOOP_LBN 8 -#define FRF_AB_GM_LOOP_WIDTH 1 -#define FRF_AB_GM_RX_FC_EN_LBN 5 -#define FRF_AB_GM_RX_FC_EN_WIDTH 1 -#define FRF_AB_GM_TX_FC_EN_LBN 4 -#define FRF_AB_GM_TX_FC_EN_WIDTH 1 -#define FRF_AB_GM_SYNC_RXEN_LBN 3 -#define FRF_AB_GM_SYNC_RXEN_WIDTH 1 -#define FRF_AB_GM_RX_EN_LBN 2 -#define FRF_AB_GM_RX_EN_WIDTH 1 -#define FRF_AB_GM_SYNC_TXEN_LBN 1 -#define FRF_AB_GM_SYNC_TXEN_WIDTH 1 -#define FRF_AB_GM_TX_EN_LBN 0 -#define FRF_AB_GM_TX_EN_WIDTH 1 - -/* GM_CFG2_REG: GMAC configuration register 2 */ -#define FR_AB_GM_CFG2 0x00000e10 -#define FRF_AB_GM_PAMBL_LEN_LBN 12 -#define FRF_AB_GM_PAMBL_LEN_WIDTH 4 -#define FRF_AB_GM_IF_MODE_LBN 8 -#define FRF_AB_GM_IF_MODE_WIDTH 2 -#define FFE_AB_IF_MODE_BYTE_MODE 2 -#define FFE_AB_IF_MODE_NIBBLE_MODE 1 -#define FRF_AB_GM_HUGE_FRM_EN_LBN 5 -#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 -#define FRF_AB_GM_LEN_CHK_LBN 4 -#define FRF_AB_GM_LEN_CHK_WIDTH 1 -#define FRF_AB_GM_PAD_CRC_EN_LBN 2 -#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 -#define FRF_AB_GM_CRC_EN_LBN 1 -#define FRF_AB_GM_CRC_EN_WIDTH 1 -#define FRF_AB_GM_FD_LBN 0 -#define FRF_AB_GM_FD_WIDTH 1 - -/* GM_IPG_REG: GMAC IPG register */ -#define FR_AB_GM_IPG 0x00000e20 -#define FRF_AB_GM_NONB2B_IPG1_LBN 24 -#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 -#define FRF_AB_GM_NONB2B_IPG2_LBN 16 -#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 -#define FRF_AB_GM_MIN_IPG_ENF_LBN 8 -#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 -#define FRF_AB_GM_B2B_IPG_LBN 0 -#define FRF_AB_GM_B2B_IPG_WIDTH 7 - -/* GM_HD_REG: GMAC half duplex register */ -#define FR_AB_GM_HD 0x00000e30 -#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 -#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 -#define FRF_AB_GM_ALT_BOFF_EN_LBN 19 -#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 -#define FRF_AB_GM_BP_NO_BOFF_LBN 18 -#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 -#define FRF_AB_GM_DIS_BOFF_LBN 17 -#define FRF_AB_GM_DIS_BOFF_WIDTH 1 -#define FRF_AB_GM_EXDEF_TX_EN_LBN 16 -#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 -#define FRF_AB_GM_RTRY_LIMIT_LBN 12 -#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 -#define FRF_AB_GM_COL_WIN_LBN 0 -#define FRF_AB_GM_COL_WIN_WIDTH 10 - -/* GM_MAX_FLEN_REG: GMAC maximum frame length register */ -#define FR_AB_GM_MAX_FLEN 0x00000e40 -#define FRF_AB_GM_MAX_FLEN_LBN 0 -#define FRF_AB_GM_MAX_FLEN_WIDTH 16 - -/* GM_TEST_REG: GMAC test register */ -#define FR_AB_GM_TEST 0x00000e70 -#define FRF_AB_GM_MAX_BOFF_LBN 3 -#define FRF_AB_GM_MAX_BOFF_WIDTH 1 -#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 -#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 -#define FRF_AB_GM_TEST_PAUSE_LBN 1 -#define FRF_AB_GM_TEST_PAUSE_WIDTH 1 -#define FRF_AB_GM_SHORT_SLOT_LBN 0 -#define FRF_AB_GM_SHORT_SLOT_WIDTH 1 - -/* GM_ADR1_REG: GMAC station address register 1 */ -#define FR_AB_GM_ADR1 0x00000f00 -#define FRF_AB_GM_ADR_B0_LBN 24 -#define FRF_AB_GM_ADR_B0_WIDTH 8 -#define FRF_AB_GM_ADR_B1_LBN 16 -#define FRF_AB_GM_ADR_B1_WIDTH 8 -#define FRF_AB_GM_ADR_B2_LBN 8 -#define FRF_AB_GM_ADR_B2_WIDTH 8 -#define FRF_AB_GM_ADR_B3_LBN 0 -#define FRF_AB_GM_ADR_B3_WIDTH 8 - -/* GM_ADR2_REG: GMAC station address register 2 */ -#define FR_AB_GM_ADR2 0x00000f10 -#define FRF_AB_GM_ADR_B4_LBN 24 -#define FRF_AB_GM_ADR_B4_WIDTH 8 -#define FRF_AB_GM_ADR_B5_LBN 16 -#define FRF_AB_GM_ADR_B5_WIDTH 8 - -/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ -#define FR_AB_GMF_CFG0 0x00000f20 -#define FRF_AB_GMF_FTFENRPLY_LBN 20 -#define FRF_AB_GMF_FTFENRPLY_WIDTH 1 -#define FRF_AB_GMF_STFENRPLY_LBN 19 -#define FRF_AB_GMF_STFENRPLY_WIDTH 1 -#define FRF_AB_GMF_FRFENRPLY_LBN 18 -#define FRF_AB_GMF_FRFENRPLY_WIDTH 1 -#define FRF_AB_GMF_SRFENRPLY_LBN 17 -#define FRF_AB_GMF_SRFENRPLY_WIDTH 1 -#define FRF_AB_GMF_WTMENRPLY_LBN 16 -#define FRF_AB_GMF_WTMENRPLY_WIDTH 1 -#define FRF_AB_GMF_FTFENREQ_LBN 12 -#define FRF_AB_GMF_FTFENREQ_WIDTH 1 -#define FRF_AB_GMF_STFENREQ_LBN 11 -#define FRF_AB_GMF_STFENREQ_WIDTH 1 -#define FRF_AB_GMF_FRFENREQ_LBN 10 -#define FRF_AB_GMF_FRFENREQ_WIDTH 1 -#define FRF_AB_GMF_SRFENREQ_LBN 9 -#define FRF_AB_GMF_SRFENREQ_WIDTH 1 -#define FRF_AB_GMF_WTMENREQ_LBN 8 -#define FRF_AB_GMF_WTMENREQ_WIDTH 1 -#define FRF_AB_GMF_HSTRSTFT_LBN 4 -#define FRF_AB_GMF_HSTRSTFT_WIDTH 1 -#define FRF_AB_GMF_HSTRSTST_LBN 3 -#define FRF_AB_GMF_HSTRSTST_WIDTH 1 -#define FRF_AB_GMF_HSTRSTFR_LBN 2 -#define FRF_AB_GMF_HSTRSTFR_WIDTH 1 -#define FRF_AB_GMF_HSTRSTSR_LBN 1 -#define FRF_AB_GMF_HSTRSTSR_WIDTH 1 -#define FRF_AB_GMF_HSTRSTWT_LBN 0 -#define FRF_AB_GMF_HSTRSTWT_WIDTH 1 - -/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ -#define FR_AB_GMF_CFG1 0x00000f30 -#define FRF_AB_GMF_CFGFRTH_LBN 16 -#define FRF_AB_GMF_CFGFRTH_WIDTH 5 -#define FRF_AB_GMF_CFGXOFFRTX_LBN 0 -#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 - -/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ -#define FR_AB_GMF_CFG2 0x00000f40 -#define FRF_AB_GMF_CFGHWM_LBN 16 -#define FRF_AB_GMF_CFGHWM_WIDTH 6 -#define FRF_AB_GMF_CFGLWM_LBN 0 -#define FRF_AB_GMF_CFGLWM_WIDTH 6 - -/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ -#define FR_AB_GMF_CFG3 0x00000f50 -#define FRF_AB_GMF_CFGHWMFT_LBN 16 -#define FRF_AB_GMF_CFGHWMFT_WIDTH 6 -#define FRF_AB_GMF_CFGFTTH_LBN 0 -#define FRF_AB_GMF_CFGFTTH_WIDTH 6 - -/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ -#define FR_AB_GMF_CFG4 0x00000f60 -#define FRF_AB_GMF_HSTFLTRFRM_LBN 0 -#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 - -/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ -#define FR_AB_GMF_CFG5 0x00000f70 -#define FRF_AB_GMF_CFGHDPLX_LBN 22 -#define FRF_AB_GMF_CFGHDPLX_WIDTH 1 -#define FRF_AB_GMF_SRFULL_LBN 21 -#define FRF_AB_GMF_SRFULL_WIDTH 1 -#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 -#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 -#define FRF_AB_GMF_CFGBYTMODE_LBN 19 -#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 -#define FRF_AB_GMF_HSTDRPLT64_LBN 18 -#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 -#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 -#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 - -/* TX_SRC_MAC_TBL: Transmit IP source address filter table */ -#define FR_BB_TX_SRC_MAC_TBL 0x00001000 -#define FR_BB_TX_SRC_MAC_TBL_STEP 16 -#define FR_BB_TX_SRC_MAC_TBL_ROWS 16 -#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 -#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 -#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 -#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 - -/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ -#define FR_BB_TX_SRC_MAC_CTL 0x00001100 -#define FRF_BB_TX_SRC_DROP_CTR_LBN 16 -#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 -#define FRF_BB_TX_SRC_FLTR_EN_LBN 15 -#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 -#define FRF_BB_TX_DROP_CTR_CLR_LBN 12 -#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 -#define FRF_BB_TX_MAC_QID_SEL_LBN 0 -#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 - -/* XM_ADR_LO_REG: XGMAC address register low */ -#define FR_AB_XM_ADR_LO 0x00001200 -#define FRF_AB_XM_ADR_LO_LBN 0 -#define FRF_AB_XM_ADR_LO_WIDTH 32 - -/* XM_ADR_HI_REG: XGMAC address register high */ -#define FR_AB_XM_ADR_HI 0x00001210 -#define FRF_AB_XM_ADR_HI_LBN 0 -#define FRF_AB_XM_ADR_HI_WIDTH 16 - -/* XM_GLB_CFG_REG: XGMAC global configuration */ -#define FR_AB_XM_GLB_CFG 0x00001220 -#define FRF_AB_XM_RMTFLT_GEN_LBN 17 -#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 -#define FRF_AB_XM_DEBUG_MODE_LBN 16 -#define FRF_AB_XM_DEBUG_MODE_WIDTH 1 -#define FRF_AB_XM_RX_STAT_EN_LBN 11 -#define FRF_AB_XM_RX_STAT_EN_WIDTH 1 -#define FRF_AB_XM_TX_STAT_EN_LBN 10 -#define FRF_AB_XM_TX_STAT_EN_WIDTH 1 -#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 -#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 -#define FRF_AB_XM_WAN_MODE_LBN 5 -#define FRF_AB_XM_WAN_MODE_WIDTH 1 -#define FRF_AB_XM_INTCLR_MODE_LBN 3 -#define FRF_AB_XM_INTCLR_MODE_WIDTH 1 -#define FRF_AB_XM_CORE_RST_LBN 0 -#define FRF_AB_XM_CORE_RST_WIDTH 1 - -/* XM_TX_CFG_REG: XGMAC transmit configuration */ -#define FR_AB_XM_TX_CFG 0x00001230 -#define FRF_AB_XM_TX_PROG_LBN 24 -#define FRF_AB_XM_TX_PROG_WIDTH 1 -#define FRF_AB_XM_IPG_LBN 16 -#define FRF_AB_XM_IPG_WIDTH 4 -#define FRF_AB_XM_FCNTL_LBN 10 -#define FRF_AB_XM_FCNTL_WIDTH 1 -#define FRF_AB_XM_TXCRC_LBN 8 -#define FRF_AB_XM_TXCRC_WIDTH 1 -#define FRF_AB_XM_EDRC_LBN 6 -#define FRF_AB_XM_EDRC_WIDTH 1 -#define FRF_AB_XM_AUTO_PAD_LBN 5 -#define FRF_AB_XM_AUTO_PAD_WIDTH 1 -#define FRF_AB_XM_TX_PRMBL_LBN 2 -#define FRF_AB_XM_TX_PRMBL_WIDTH 1 -#define FRF_AB_XM_TXEN_LBN 1 -#define FRF_AB_XM_TXEN_WIDTH 1 -#define FRF_AB_XM_TX_RST_LBN 0 -#define FRF_AB_XM_TX_RST_WIDTH 1 - -/* XM_RX_CFG_REG: XGMAC receive configuration */ -#define FR_AB_XM_RX_CFG 0x00001240 -#define FRF_AB_XM_PASS_LENERR_LBN 26 -#define FRF_AB_XM_PASS_LENERR_WIDTH 1 -#define FRF_AB_XM_PASS_CRC_ERR_LBN 25 -#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 -#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 -#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 -#define FRF_AB_XM_REJ_BCAST_LBN 20 -#define FRF_AB_XM_REJ_BCAST_WIDTH 1 -#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 -#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 -#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 -#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 -#define FRF_AB_XM_AUTO_DEPAD_LBN 8 -#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 -#define FRF_AB_XM_RXCRC_LBN 3 -#define FRF_AB_XM_RXCRC_WIDTH 1 -#define FRF_AB_XM_RX_PRMBL_LBN 2 -#define FRF_AB_XM_RX_PRMBL_WIDTH 1 -#define FRF_AB_XM_RXEN_LBN 1 -#define FRF_AB_XM_RXEN_WIDTH 1 -#define FRF_AB_XM_RX_RST_LBN 0 -#define FRF_AB_XM_RX_RST_WIDTH 1 - -/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ -#define FR_AB_XM_MGT_INT_MASK 0x00001250 -#define FRF_AB_XM_MSK_STA_INTR_LBN 16 -#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 -#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 -#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 -#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 -#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 -#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 -#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 -#define FRF_AB_XM_MSK_RMTFLT_LBN 1 -#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 -#define FRF_AB_XM_MSK_LCLFLT_LBN 0 -#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 - -/* XM_FC_REG: XGMAC flow control register */ -#define FR_AB_XM_FC 0x00001270 -#define FRF_AB_XM_PAUSE_TIME_LBN 16 -#define FRF_AB_XM_PAUSE_TIME_WIDTH 16 -#define FRF_AB_XM_RX_MAC_STAT_LBN 11 -#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 -#define FRF_AB_XM_TX_MAC_STAT_LBN 10 -#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 -#define FRF_AB_XM_MCNTL_PASS_LBN 8 -#define FRF_AB_XM_MCNTL_PASS_WIDTH 2 -#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 -#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 -#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 -#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 -#define FRF_AB_XM_ZPAUSE_LBN 2 -#define FRF_AB_XM_ZPAUSE_WIDTH 1 -#define FRF_AB_XM_XMIT_PAUSE_LBN 1 -#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 -#define FRF_AB_XM_DIS_FCNTL_LBN 0 -#define FRF_AB_XM_DIS_FCNTL_WIDTH 1 - -/* XM_PAUSE_TIME_REG: XGMAC pause time register */ -#define FR_AB_XM_PAUSE_TIME 0x00001290 -#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 -#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 -#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 -#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 - -/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ -#define FR_AB_XM_TX_PARAM 0x000012d0 -#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 -#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 -#define FRF_AB_XM_PAD_CHAR_LBN 0 -#define FRF_AB_XM_PAD_CHAR_WIDTH 8 - -/* XM_RX_PARAM_REG: XGMAC receive parameter register */ -#define FR_AB_XM_RX_PARAM 0x000012e0 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 - -/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ -#define FR_AB_XM_MGT_INT_MSK 0x000012f0 -#define FRF_AB_XM_STAT_CNTR_OF_LBN 9 -#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 -#define FRF_AB_XM_STAT_CNTR_HF_LBN 8 -#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 -#define FRF_AB_XM_PRMBLE_ERR_LBN 2 -#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 -#define FRF_AB_XM_RMTFLT_LBN 1 -#define FRF_AB_XM_RMTFLT_WIDTH 1 -#define FRF_AB_XM_LCLFLT_LBN 0 -#define FRF_AB_XM_LCLFLT_WIDTH 1 - -/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ -#define FR_AB_XX_PWR_RST 0x00001300 -#define FRF_AB_XX_PWRDND_SIG_LBN 31 -#define FRF_AB_XX_PWRDND_SIG_WIDTH 1 -#define FRF_AB_XX_PWRDNC_SIG_LBN 30 -#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 -#define FRF_AB_XX_PWRDNB_SIG_LBN 29 -#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 -#define FRF_AB_XX_PWRDNA_SIG_LBN 28 -#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 -#define FRF_AB_XX_SIM_MODE_LBN 27 -#define FRF_AB_XX_SIM_MODE_WIDTH 1 -#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 -#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 -#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 -#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 -#define FRF_AB_XX_RESETD_SIG_LBN 23 -#define FRF_AB_XX_RESETD_SIG_WIDTH 1 -#define FRF_AB_XX_RESETC_SIG_LBN 22 -#define FRF_AB_XX_RESETC_SIG_WIDTH 1 -#define FRF_AB_XX_RESETB_SIG_LBN 21 -#define FRF_AB_XX_RESETB_SIG_WIDTH 1 -#define FRF_AB_XX_RESETA_SIG_LBN 20 -#define FRF_AB_XX_RESETA_SIG_WIDTH 1 -#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 -#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 -#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 -#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 -#define FRF_AB_XX_SD_RST_ACT_LBN 16 -#define FRF_AB_XX_SD_RST_ACT_WIDTH 1 -#define FRF_AB_XX_PWRDND_EN_LBN 15 -#define FRF_AB_XX_PWRDND_EN_WIDTH 1 -#define FRF_AB_XX_PWRDNC_EN_LBN 14 -#define FRF_AB_XX_PWRDNC_EN_WIDTH 1 -#define FRF_AB_XX_PWRDNB_EN_LBN 13 -#define FRF_AB_XX_PWRDNB_EN_WIDTH 1 -#define FRF_AB_XX_PWRDNA_EN_LBN 12 -#define FRF_AB_XX_PWRDNA_EN_WIDTH 1 -#define FRF_AB_XX_RSTPLLCD_EN_LBN 9 -#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 -#define FRF_AB_XX_RSTPLLAB_EN_LBN 8 -#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 -#define FRF_AB_XX_RESETD_EN_LBN 7 -#define FRF_AB_XX_RESETD_EN_WIDTH 1 -#define FRF_AB_XX_RESETC_EN_LBN 6 -#define FRF_AB_XX_RESETC_EN_WIDTH 1 -#define FRF_AB_XX_RESETB_EN_LBN 5 -#define FRF_AB_XX_RESETB_EN_WIDTH 1 -#define FRF_AB_XX_RESETA_EN_LBN 4 -#define FRF_AB_XX_RESETA_EN_WIDTH 1 -#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 -#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 -#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 -#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 -#define FRF_AB_XX_RST_XX_EN_LBN 0 -#define FRF_AB_XX_RST_XX_EN_WIDTH 1 - -/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ -#define FR_AB_XX_SD_CTL 0x00001310 -#define FRF_AB_XX_TERMADJ1_LBN 17 -#define FRF_AB_XX_TERMADJ1_WIDTH 1 -#define FRF_AB_XX_TERMADJ0_LBN 16 -#define FRF_AB_XX_TERMADJ0_WIDTH 1 -#define FRF_AB_XX_HIDRVD_LBN 15 -#define FRF_AB_XX_HIDRVD_WIDTH 1 -#define FRF_AB_XX_LODRVD_LBN 14 -#define FRF_AB_XX_LODRVD_WIDTH 1 -#define FRF_AB_XX_HIDRVC_LBN 13 -#define FRF_AB_XX_HIDRVC_WIDTH 1 -#define FRF_AB_XX_LODRVC_LBN 12 -#define FRF_AB_XX_LODRVC_WIDTH 1 -#define FRF_AB_XX_HIDRVB_LBN 11 -#define FRF_AB_XX_HIDRVB_WIDTH 1 -#define FRF_AB_XX_LODRVB_LBN 10 -#define FRF_AB_XX_LODRVB_WIDTH 1 -#define FRF_AB_XX_HIDRVA_LBN 9 -#define FRF_AB_XX_HIDRVA_WIDTH 1 -#define FRF_AB_XX_LODRVA_LBN 8 -#define FRF_AB_XX_LODRVA_WIDTH 1 -#define FRF_AB_XX_LPBKD_LBN 3 -#define FRF_AB_XX_LPBKD_WIDTH 1 -#define FRF_AB_XX_LPBKC_LBN 2 -#define FRF_AB_XX_LPBKC_WIDTH 1 -#define FRF_AB_XX_LPBKB_LBN 1 -#define FRF_AB_XX_LPBKB_WIDTH 1 -#define FRF_AB_XX_LPBKA_LBN 0 -#define FRF_AB_XX_LPBKA_WIDTH 1 - -/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ -#define FR_AB_XX_TXDRV_CTL 0x00001320 -#define FRF_AB_XX_DEQD_LBN 28 -#define FRF_AB_XX_DEQD_WIDTH 4 -#define FRF_AB_XX_DEQC_LBN 24 -#define FRF_AB_XX_DEQC_WIDTH 4 -#define FRF_AB_XX_DEQB_LBN 20 -#define FRF_AB_XX_DEQB_WIDTH 4 -#define FRF_AB_XX_DEQA_LBN 16 -#define FRF_AB_XX_DEQA_WIDTH 4 -#define FRF_AB_XX_DTXD_LBN 12 -#define FRF_AB_XX_DTXD_WIDTH 4 -#define FRF_AB_XX_DTXC_LBN 8 -#define FRF_AB_XX_DTXC_WIDTH 4 -#define FRF_AB_XX_DTXB_LBN 4 -#define FRF_AB_XX_DTXB_WIDTH 4 -#define FRF_AB_XX_DTXA_LBN 0 -#define FRF_AB_XX_DTXA_WIDTH 4 - -/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ -#define FR_AB_XX_PRBS_CTL 0x00001330 -#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 -#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 -#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 -#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 -#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 -#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 -#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 -#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 -#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 -#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 -#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 -#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 -#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 -#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 -#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 -#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 -#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 -#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 -#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 -#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 -#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 -#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 -#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 -#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 -#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 - -/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ -#define FR_AB_XX_PRBS_CHK 0x00001340 -#define FRF_AB_XX_REV_LB_EN_LBN 16 -#define FRF_AB_XX_REV_LB_EN_WIDTH 1 -#define FRF_AB_XX_CH3_DEG_DET_LBN 15 -#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 -#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 -#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH3_ERR_CHK_LBN 12 -#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 -#define FRF_AB_XX_CH2_DEG_DET_LBN 11 -#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 -#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 -#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH2_ERR_CHK_LBN 8 -#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 -#define FRF_AB_XX_CH1_DEG_DET_LBN 7 -#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 -#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 -#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH1_ERR_CHK_LBN 4 -#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 -#define FRF_AB_XX_CH0_DEG_DET_LBN 3 -#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 -#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 -#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH0_ERR_CHK_LBN 0 -#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 - -/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ -#define FR_AB_XX_PRBS_ERR 0x00001350 -#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 -#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 -#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 -#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 -#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 -#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 -#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 -#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 - -/* XX_CORE_STAT_REG: XAUI XGXS core status register */ -#define FR_AB_XX_CORE_STAT 0x00001360 -#define FRF_AB_XX_FORCE_SIG3_LBN 31 -#define FRF_AB_XX_FORCE_SIG3_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 -#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG2_LBN 29 -#define FRF_AB_XX_FORCE_SIG2_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 -#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG1_LBN 27 -#define FRF_AB_XX_FORCE_SIG1_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 -#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG0_LBN 25 -#define FRF_AB_XX_FORCE_SIG0_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 -#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 -#define FRF_AB_XX_XGXS_LB_EN_LBN 23 -#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 -#define FRF_AB_XX_XGMII_LB_EN_LBN 22 -#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 -#define FRF_AB_XX_MATCH_FAULT_LBN 21 -#define FRF_AB_XX_MATCH_FAULT_WIDTH 1 -#define FRF_AB_XX_ALIGN_DONE_LBN 20 -#define FRF_AB_XX_ALIGN_DONE_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT3_LBN 19 -#define FRF_AB_XX_SYNC_STAT3_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT2_LBN 18 -#define FRF_AB_XX_SYNC_STAT2_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT1_LBN 17 -#define FRF_AB_XX_SYNC_STAT1_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT0_LBN 16 -#define FRF_AB_XX_SYNC_STAT0_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH3_LBN 15 -#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH2_LBN 14 -#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH1_LBN 13 -#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH0_LBN 12 -#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 -#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 -#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 -#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 -#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 -#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 -#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 -#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 -#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH3_LBN 3 -#define FRF_AB_XX_DISPERR_CH3_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH2_LBN 2 -#define FRF_AB_XX_DISPERR_CH2_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH1_LBN 1 -#define FRF_AB_XX_DISPERR_CH1_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH0_LBN 0 -#define FRF_AB_XX_DISPERR_CH0_WIDTH 1 - -/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ -#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 -#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 -#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 -/* RX_DESC_PTR_TBL: Receive descriptor pointer table */ -#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 -#define FR_BZ_RX_DESC_PTR_TBL_STEP 16 -#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 -#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 -#define FRF_CZ_RX_HDR_SPLIT_LBN 90 -#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 -#define FRF_AA_RX_RESET_LBN 89 -#define FRF_AA_RX_RESET_WIDTH 1 -#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 -#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 -#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 -#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 -#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 -#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 -#define FRF_AZ_RX_DC_HW_RPTR_LBN 80 -#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 -#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 -#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 -#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 -#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 -#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 -#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 -#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 -#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 -#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 -#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 -#define FRF_AZ_RX_DESCQ_LABEL_LBN 5 -#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 -#define FRF_AZ_RX_DESCQ_SIZE_LBN 3 -#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 -#define FFE_AZ_RX_DESCQ_SIZE_4K 3 -#define FFE_AZ_RX_DESCQ_SIZE_2K 2 -#define FFE_AZ_RX_DESCQ_SIZE_1K 1 -#define FFE_AZ_RX_DESCQ_SIZE_512 0 -#define FRF_AZ_RX_DESCQ_TYPE_LBN 2 -#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 -#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 -#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 -#define FRF_AZ_RX_DESCQ_EN_LBN 0 -#define FRF_AZ_RX_DESCQ_EN_WIDTH 1 - -/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ -#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 -#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 -#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 -/* TX_DESC_PTR_TBL: Transmit descriptor pointer */ -#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 -#define FR_BZ_TX_DESC_PTR_TBL_STEP 16 -#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 -#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 -#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 -#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 -#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 -#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 -#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 -#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 -#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 -#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 -#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 -#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 -#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 -#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 -#define FRF_AZ_TX_DESCQ_EN_LBN 88 -#define FRF_AZ_TX_DESCQ_EN_WIDTH 1 -#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 -#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 -#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 -#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 -#define FRF_AZ_TX_DC_HW_RPTR_LBN 80 -#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 -#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 -#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 -#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 -#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 -#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 -#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 -#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 -#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 -#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 -#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 -#define FRF_AZ_TX_DESCQ_LABEL_LBN 5 -#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 -#define FRF_AZ_TX_DESCQ_SIZE_LBN 3 -#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 -#define FFE_AZ_TX_DESCQ_SIZE_4K 3 -#define FFE_AZ_TX_DESCQ_SIZE_2K 2 -#define FFE_AZ_TX_DESCQ_SIZE_1K 1 -#define FFE_AZ_TX_DESCQ_SIZE_512 0 -#define FRF_AZ_TX_DESCQ_TYPE_LBN 1 -#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 -#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 -#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 - -/* EVQ_PTR_TBL_KER: Event queue pointer table */ -#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 -#define FR_AA_EVQ_PTR_TBL_KER_STEP 16 -#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 -/* EVQ_PTR_TBL: Event queue pointer table */ -#define FR_BZ_EVQ_PTR_TBL 0x00f60000 -#define FR_BZ_EVQ_PTR_TBL_STEP 16 -#define FR_CZ_EVQ_PTR_TBL_ROWS 1024 -#define FR_BB_EVQ_PTR_TBL_ROWS 4096 -#define FRF_BZ_EVQ_RPTR_IGN_LBN 40 -#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 -#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 -#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 -#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 -#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 -#define FRF_AZ_EVQ_NXT_WPTR_LBN 24 -#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 -#define FRF_AZ_EVQ_EN_LBN 23 -#define FRF_AZ_EVQ_EN_WIDTH 1 -#define FRF_AZ_EVQ_SIZE_LBN 20 -#define FRF_AZ_EVQ_SIZE_WIDTH 3 -#define FFE_AZ_EVQ_SIZE_32K 6 -#define FFE_AZ_EVQ_SIZE_16K 5 -#define FFE_AZ_EVQ_SIZE_8K 4 -#define FFE_AZ_EVQ_SIZE_4K 3 -#define FFE_AZ_EVQ_SIZE_2K 2 -#define FFE_AZ_EVQ_SIZE_1K 1 -#define FFE_AZ_EVQ_SIZE_512 0 -#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 -#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 - -/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ -#define FR_AA_BUF_HALF_TBL_KER 0x00018000 -#define FR_AA_BUF_HALF_TBL_KER_STEP 8 -#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 -/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ -#define FR_BZ_BUF_HALF_TBL 0x00800000 -#define FR_BZ_BUF_HALF_TBL_STEP 8 -#define FR_CZ_BUF_HALF_TBL_ROWS 147456 -#define FR_BB_BUF_HALF_TBL_ROWS 524288 -#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 -#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 -#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 -#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 -#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 -#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 -#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 -#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 - -/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ -#define FR_AA_BUF_FULL_TBL_KER 0x00018000 -#define FR_AA_BUF_FULL_TBL_KER_STEP 8 -#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 -/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ -#define FR_BZ_BUF_FULL_TBL 0x00800000 -#define FR_BZ_BUF_FULL_TBL_STEP 8 -#define FR_CZ_BUF_FULL_TBL_ROWS 147456 -#define FR_BB_BUF_FULL_TBL_ROWS 917504 -#define FRF_AZ_BUF_FULL_UNUSED_LBN 51 -#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 -#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 -#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 -#define FRF_AZ_BUF_ADR_REGION_LBN 48 -#define FRF_AZ_BUF_ADR_REGION_WIDTH 2 -#define FFE_AZ_BUF_ADR_REGN3 3 -#define FFE_AZ_BUF_ADR_REGN2 2 -#define FFE_AZ_BUF_ADR_REGN1 1 -#define FFE_AZ_BUF_ADR_REGN0 0 -#define FRF_AZ_BUF_ADR_FBUF_LBN 14 -#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 -#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 -#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 - -/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ -#define FR_BZ_RX_FILTER_TBL0 0x00f00000 -#define FR_BZ_RX_FILTER_TBL0_STEP 32 -#define FR_BZ_RX_FILTER_TBL0_ROWS 8192 -/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ -#define FR_BB_RX_FILTER_TBL1 0x00f00010 -#define FR_BB_RX_FILTER_TBL1_STEP 32 -#define FR_BB_RX_FILTER_TBL1_ROWS 8192 -#define FRF_BZ_RSS_EN_LBN 110 -#define FRF_BZ_RSS_EN_WIDTH 1 -#define FRF_BZ_SCATTER_EN_LBN 109 -#define FRF_BZ_SCATTER_EN_WIDTH 1 -#define FRF_BZ_TCP_UDP_LBN 108 -#define FRF_BZ_TCP_UDP_WIDTH 1 -#define FRF_BZ_RXQ_ID_LBN 96 -#define FRF_BZ_RXQ_ID_WIDTH 12 -#define FRF_BZ_DEST_IP_LBN 64 -#define FRF_BZ_DEST_IP_WIDTH 32 -#define FRF_BZ_DEST_PORT_TCP_LBN 48 -#define FRF_BZ_DEST_PORT_TCP_WIDTH 16 -#define FRF_BZ_SRC_IP_LBN 16 -#define FRF_BZ_SRC_IP_WIDTH 32 -#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 -#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 - -/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ -#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 -#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 -#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 -#define FRF_CZ_RMFT_RSS_EN_LBN 75 -#define FRF_CZ_RMFT_RSS_EN_WIDTH 1 -#define FRF_CZ_RMFT_SCATTER_EN_LBN 74 -#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 -#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 -#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 -#define FRF_CZ_RMFT_RXQ_ID_LBN 61 -#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 -#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 -#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 -#define FRF_CZ_RMFT_DEST_MAC_LBN 12 -#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48 -#define FRF_CZ_RMFT_VLAN_ID_LBN 0 -#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 - -/* TIMER_TBL: Timer table */ -#define FR_BZ_TIMER_TBL 0x00f70000 -#define FR_BZ_TIMER_TBL_STEP 16 -#define FR_CZ_TIMER_TBL_ROWS 1024 -#define FR_BB_TIMER_TBL_ROWS 4096 -#define FRF_CZ_TIMER_Q_EN_LBN 33 -#define FRF_CZ_TIMER_Q_EN_WIDTH 1 -#define FRF_CZ_INT_ARMD_LBN 32 -#define FRF_CZ_INT_ARMD_WIDTH 1 -#define FRF_CZ_INT_PEND_LBN 31 -#define FRF_CZ_INT_PEND_WIDTH 1 -#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 -#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 -#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 -#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 -#define FRF_CZ_TIMER_MODE_LBN 14 -#define FRF_CZ_TIMER_MODE_WIDTH 2 -#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 -#define FFE_CZ_TIMER_MODE_TRIG_START 2 -#define FFE_CZ_TIMER_MODE_IMMED_START 1 -#define FFE_CZ_TIMER_MODE_DIS 0 -#define FRF_BB_TIMER_MODE_LBN 12 -#define FRF_BB_TIMER_MODE_WIDTH 2 -#define FFE_BB_TIMER_MODE_INT_HLDOFF 2 -#define FFE_BB_TIMER_MODE_TRIG_START 2 -#define FFE_BB_TIMER_MODE_IMMED_START 1 -#define FFE_BB_TIMER_MODE_DIS 0 -#define FRF_CZ_TIMER_VAL_LBN 0 -#define FRF_CZ_TIMER_VAL_WIDTH 14 -#define FRF_BB_TIMER_VAL_LBN 0 -#define FRF_BB_TIMER_VAL_WIDTH 12 - -/* TX_PACE_TBL: Transmit pacing table */ -#define FR_BZ_TX_PACE_TBL 0x00f80000 -#define FR_BZ_TX_PACE_TBL_STEP 16 -#define FR_CZ_TX_PACE_TBL_ROWS 1024 -#define FR_BB_TX_PACE_TBL_ROWS 4096 -#define FRF_BZ_TX_PACE_LBN 0 -#define FRF_BZ_TX_PACE_WIDTH 5 - -/* RX_INDIRECTION_TBL: RX Indirection Table */ -#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 -#define FR_BZ_RX_INDIRECTION_TBL_STEP 16 -#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 -#define FRF_BZ_IT_QUEUE_LBN 0 -#define FRF_BZ_IT_QUEUE_WIDTH 6 - -/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ -#define FR_CZ_TX_FILTER_TBL0 0x00fc0000 -#define FR_CZ_TX_FILTER_TBL0_STEP 16 -#define FR_CZ_TX_FILTER_TBL0_ROWS 8192 -#define FRF_CZ_TIFT_TCP_UDP_LBN 108 -#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 -#define FRF_CZ_TIFT_TXQ_ID_LBN 96 -#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 -#define FRF_CZ_TIFT_DEST_IP_LBN 64 -#define FRF_CZ_TIFT_DEST_IP_WIDTH 32 -#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 -#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 -#define FRF_CZ_TIFT_SRC_IP_LBN 16 -#define FRF_CZ_TIFT_SRC_IP_WIDTH 32 -#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 -#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 - -/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ -#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 -#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 -#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 -#define FRF_CZ_TMFT_TXQ_ID_LBN 61 -#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 -#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 -#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 -#define FRF_CZ_TMFT_SRC_MAC_LBN 12 -#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48 -#define FRF_CZ_TMFT_VLAN_ID_LBN 0 -#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 - -/* MC_TREG_SMEM: MC Shared Memory */ -#define FR_CZ_MC_TREG_SMEM 0x00ff0000 -#define FR_CZ_MC_TREG_SMEM_STEP 4 -#define FR_CZ_MC_TREG_SMEM_ROWS 512 -#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 -#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 - -/* MSIX_VECTOR_TABLE: MSIX Vector Table */ -#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 -#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 -#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 -/* MSIX_VECTOR_TABLE: MSIX Vector Table */ -#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 -/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ -#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 -#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 -#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 -#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 -#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 -#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 -#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 - -/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ -#define FR_BB_MSIX_PBA_TABLE 0x00ff2000 -#define FR_BZ_MSIX_PBA_TABLE_STEP 4 -#define FR_BB_MSIX_PBA_TABLE_ROWS 2 -/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ -#define FR_CZ_MSIX_PBA_TABLE 0x00008000 -/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ -#define FR_CZ_MSIX_PBA_TABLE_ROWS 32 -#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 -#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 - -/* SRM_DBG_REG: SRAM debug access */ -#define FR_BZ_SRM_DBG 0x03000000 -#define FR_BZ_SRM_DBG_STEP 8 -#define FR_CZ_SRM_DBG_ROWS 262144 -#define FR_BB_SRM_DBG_ROWS 2097152 -#define FRF_BZ_SRM_DBG_LBN 0 -#define FRF_BZ_SRM_DBG_WIDTH 64 - -/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ -#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 -#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 -#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 -#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 -#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 - -/* DRIVER_EV */ -#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 -#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 -#define FSE_BZ_TX_DSC_ERROR_EV 15 -#define FSE_BZ_RX_DSC_ERROR_EV 14 -#define FSE_AA_RX_RECOVER_EV 11 -#define FSE_AZ_TIMER_EV 10 -#define FSE_AZ_TX_PKT_NON_TCP_UDP 9 -#define FSE_AZ_WAKE_UP_EV 6 -#define FSE_AZ_SRM_UPD_DONE_EV 5 -#define FSE_AB_EVQ_NOT_EN_EV 3 -#define FSE_AZ_EVQ_INIT_DONE_EV 2 -#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 -#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 -#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 -#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 - -/* EVENT_ENTRY */ -#define FSF_AZ_EV_CODE_LBN 60 -#define FSF_AZ_EV_CODE_WIDTH 4 -#define FSE_CZ_EV_CODE_MCDI_EV 12 -#define FSE_CZ_EV_CODE_USER_EV 8 -#define FSE_AZ_EV_CODE_DRV_GEN_EV 7 -#define FSE_AZ_EV_CODE_GLOBAL_EV 6 -#define FSE_AZ_EV_CODE_DRIVER_EV 5 -#define FSE_AZ_EV_CODE_TX_EV 2 -#define FSE_AZ_EV_CODE_RX_EV 0 -#define FSF_AZ_EV_DATA_LBN 0 -#define FSF_AZ_EV_DATA_WIDTH 60 - -/* GLOBAL_EV */ -#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 -#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 -#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 -#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 -#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 -#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 -#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 -#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 -#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 -#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 -#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 -#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 - -/* LEGACY_INT_VEC */ -#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 -#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 -#define FSF_AZ_NET_IVEC_INT_Q_LBN 40 -#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 -#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 -#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 - -/* MC_XGMAC_FLTR_RULE_DEF */ -#define FSF_CZ_MC_XFRC_MODE_LBN 416 -#define FSF_CZ_MC_XFRC_MODE_WIDTH 1 -#define FSE_CZ_MC_XFRC_MODE_LAYERED 1 -#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 -#define FSF_CZ_MC_XFRC_HASH_LBN 384 -#define FSF_CZ_MC_XFRC_HASH_WIDTH 32 -#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 -#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 -#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 -#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 -#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 -#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 - -/* RX_EV */ -#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 -#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 -#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 -#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 -#define FSF_AZ_RX_EV_PKT_OK_LBN 56 -#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 -#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 -#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 -#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 -#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 -#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 -#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 -#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 -#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 -#define FSF_AA_RX_EV_DRIB_NIB_LBN 49 -#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 -#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 -#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 -#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 -#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 -#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 -#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 -#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 -#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 -#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 -#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 -#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 -#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 -#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 -#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 -#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 -#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 -#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 -#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 -#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 -#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 -#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 -#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 -#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 -#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 -#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 -#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 -#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 -#define FSF_AZ_RX_EV_Q_LABEL_LBN 32 -#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 -#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 -#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 -#define FSF_AZ_RX_EV_PORT_LBN 30 -#define FSF_AZ_RX_EV_PORT_WIDTH 1 -#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 -#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 -#define FSF_AZ_RX_EV_SOP_LBN 15 -#define FSF_AZ_RX_EV_SOP_WIDTH 1 -#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 -#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 -#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 -#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 -#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_DESC_PTR_LBN 0 -#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 - -/* RX_KER_DESC */ -#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 -#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 -#define FSF_AZ_RX_KER_BUF_REGION_LBN 46 -#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 -#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 -#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 - -/* RX_USER_DESC */ -#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 -#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 -#define FSF_AZ_RX_USER_BUF_ID_LBN 0 -#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 - -/* TX_EV */ -#define FSF_AZ_TX_EV_PKT_ERR_LBN 38 -#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 -#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 -#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 -#define FSF_AZ_TX_EV_Q_LABEL_LBN 32 -#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 -#define FSF_AZ_TX_EV_PORT_LBN 16 -#define FSF_AZ_TX_EV_PORT_WIDTH 1 -#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 -#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 -#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 -#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 -#define FSF_AZ_TX_EV_COMP_LBN 12 -#define FSF_AZ_TX_EV_COMP_WIDTH 1 -#define FSF_AZ_TX_EV_DESC_PTR_LBN 0 -#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 - -/* TX_KER_DESC */ -#define FSF_AZ_TX_KER_CONT_LBN 62 -#define FSF_AZ_TX_KER_CONT_WIDTH 1 -#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 -#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 -#define FSF_AZ_TX_KER_BUF_REGION_LBN 46 -#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 -#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 -#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 - -/* TX_USER_DESC */ -#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 -#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 -#define FSF_AZ_TX_USER_CONT_LBN 46 -#define FSF_AZ_TX_USER_CONT_WIDTH 1 -#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 -#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 -#define FSF_AZ_TX_USER_BUF_ID_LBN 13 -#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 -#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 -#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 - -/* USER_EV */ -#define FSF_CZ_USER_QID_LBN 32 -#define FSF_CZ_USER_QID_WIDTH 10 -#define FSF_CZ_USER_EV_REG_VALUE_LBN 0 -#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 - -/************************************************************************** - * - * Falcon B0 PCIe core indirect registers - * - ************************************************************************** - */ - -#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 - -#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 - -#define FPCR_BB_ACK_RPL_TIMER 0x700 -#define FPCRF_BB_ACK_TL_LBN 0 -#define FPCRF_BB_ACK_TL_WIDTH 16 -#define FPCRF_BB_RPL_TL_LBN 16 -#define FPCRF_BB_RPL_TL_WIDTH 16 - -#define FPCR_BB_ACK_FREQ 0x70C -#define FPCRF_BB_ACK_FREQ_LBN 0 -#define FPCRF_BB_ACK_FREQ_WIDTH 7 - -/************************************************************************** - * - * Pseudo-registers and fields - * - ************************************************************************** - */ - -/* Interrupt acknowledge work-around register (A0/A1 only) */ -#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 - -/* EE_SPI_HCMD_REG: SPI host command register */ -/* Values for the EE_SPI_HCMD_SF_SEL register field */ -#define FFE_AB_SPI_DEVICE_EEPROM 0 -#define FFE_AB_SPI_DEVICE_FLASH 1 - -/* NIC_STAT_REG: NIC status register */ -#define FRF_AB_STRAP_10G_LBN 2 -#define FRF_AB_STRAP_10G_WIDTH 1 -#define FRF_AA_STRAP_PCIE_LBN 0 -#define FRF_AA_STRAP_PCIE_WIDTH 1 - -/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ -#define FRF_AZ_FATAL_INTR_LBN 0 -#define FRF_AZ_FATAL_INTR_WIDTH 12 - -/* SRM_CFG_REG: SRAM configuration register */ -/* We treat the number of SRAM banks and bank size as a single field */ -#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN -#define FRF_AZ_SRM_NB_SZ_WIDTH \ - (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) -#define FFE_AB_SRM_NB1_SZ2M 0 -#define FFE_AB_SRM_NB1_SZ4M 1 -#define FFE_AB_SRM_NB1_SZ8M 2 -#define FFE_AB_SRM_NB_SZ_DEF 3 -#define FFE_AB_SRM_NB2_SZ4M 4 -#define FFE_AB_SRM_NB2_SZ8M 5 -#define FFE_AB_SRM_NB2_SZ16M 6 -#define FFE_AB_SRM_NB_SZ_RES 7 - -/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ -/* We write just the last dword of these registers */ -#define FR_AZ_RX_DESC_UPD_DWORD_P0 \ - (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ - FR_BZ_RX_DESC_UPD_P0 + 3 * 4) -#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) -#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH - -/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ -#define FR_AZ_TX_DESC_UPD_DWORD_P0 \ - (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ - FR_BZ_TX_DESC_UPD_P0 + 3 * 4) -#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) -#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH - -/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ -#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 -#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 - -/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ -#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 -#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 - -/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ -#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN -#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ - FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) - -/* XM_RX_PARAM_REG: XGMAC receive parameter register */ -#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN -#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ - FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) - -/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ -/* Default values */ -#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ -#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ -#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ - -/* XX_CORE_STAT_REG: XAUI XGXS core status register */ -/* XGXS all-lanes status fields */ -#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN -#define FRF_AB_XX_SYNC_STAT_WIDTH 4 -#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN -#define FRF_AB_XX_COMMA_DET_WIDTH 4 -#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN -#define FRF_AB_XX_CHAR_ERR_WIDTH 4 -#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN -#define FRF_AB_XX_DISPERR_WIDTH 4 -#define FFE_AB_XX_STAT_ALL_LANES 0xf -#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN -#define FRF_AB_XX_FORCE_SIG_WIDTH 8 -#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff - -/* RX_MAC_FILTER_TBL0 */ -/* RMFT_DEST_MAC is wider than 32 bits */ -#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN -#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 -#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32) -#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32) - -/* TX_MAC_FILTER_TBL0 */ -/* TMFT_SRC_MAC is wider than 32 bits */ -#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN -#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 -#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32) -#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32) - -/* TX_PACE_TBL */ -/* Values >20 are documented as reserved, but will result in a queue going - * into the fast bin with a pace value of zero. */ -#define FFE_BZ_TX_PACE_OFF 0 -#define FFE_BZ_TX_PACE_RESERVED 21 - -/* DRIVER_EV */ -/* Sub-fields of an RX flush completion event */ -#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 -#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 -#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 -#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 - -/* EVENT_ENTRY */ -/* Magic number field for event test */ -#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 -#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 - -#endif /* EFX_REGS_H */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index f0ae262..d0eeb03 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -19,7 +19,7 @@ #include "efx.h" #include "nic.h" #include "spi.h" -#include "regs.h" +#include "farch_regs.h" #include "io.h" #include "phy.h" #include "workarounds.h" diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 6258e7f..4d214bc 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -15,7 +15,7 @@ #include "mcdi.h" #include "filter.h" #include "mcdi_pcol.h" -#include "regs.h" +#include "farch_regs.h" #include "vfdi.h" /* Number of longs required to track all the VIs in a VF */ -- cgit v0.10.2 From 514bedbc3a07e466b040f76319b8f2a4c7b0c7a4 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 5 Oct 2012 19:30:16 +0100 Subject: sfc: Remove efx_process_channel_now() efx_process_channel_now() is unneeded since self-tests can rely on normal NAPI polling. Remove it and all calls to it. efx_channel::work_pending and efx_channel_processed() are also unneeded (the latter being the same as efx_nic_eventq_read_ack()). Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index c729688..2b2dba1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -255,23 +255,6 @@ static int efx_process_channel(struct efx_channel *channel, int budget) return spent; } -/* Mark channel as finished processing - * - * Note that since we will not receive further interrupts for this - * channel before we finish processing and call the eventq_read_ack() - * method, there is no need to use the interrupt hold-off timers. - */ -static inline void efx_channel_processed(struct efx_channel *channel) -{ - /* The interrupt handler for this channel may set work_pending - * as soon as we acknowledge the events we've seen. Make sure - * it's cleared before then. */ - channel->work_pending = false; - smp_wmb(); - - efx_nic_eventq_read_ack(channel); -} - /* NAPI poll handler * * NAPI guarantees serialisation of polls of the same device, which @@ -316,58 +299,16 @@ static int efx_poll(struct napi_struct *napi, int budget) /* There is no race here; although napi_disable() will * only wait for napi_complete(), this isn't a problem - * since efx_channel_processed() will have no effect if + * since efx_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ napi_complete(napi); - efx_channel_processed(channel); + efx_nic_eventq_read_ack(channel); } return spent; } -/* Process the eventq of the specified channel immediately on this CPU - * - * Disable hardware generated interrupts, wait for any existing - * processing to finish, then directly poll (and ack ) the eventq. - * Finally reenable NAPI and interrupts. - * - * This is for use only during a loopback self-test. It must not - * deliver any packets up the stack as this can result in deadlock. - */ -void efx_process_channel_now(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - - BUG_ON(channel->channel >= efx->n_channels); - BUG_ON(!channel->enabled); - BUG_ON(!efx->loopback_selftest); - - /* Disable interrupts and wait for ISRs to complete */ - efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) { - synchronize_irq(efx->legacy_irq); - efx->legacy_irq_enabled = false; - } - if (channel->irq) - synchronize_irq(channel->irq); - - /* Wait for any NAPI processing to complete */ - napi_disable(&channel->napi_str); - - /* Poll the channel */ - efx_process_channel(channel, channel->eventq_mask + 1); - - /* Ack the eventq. This may cause an interrupt to be generated - * when they are reenabled */ - efx_channel_processed(channel); - - napi_enable(&channel->napi_str); - if (efx->legacy_irq) - efx->legacy_irq_enabled = true; - efx_nic_enable_interrupts(efx); -} - /* Create event queue * Event queue memory allocations are done only once. If the channel * is reset, the memory buffer will be reused; this guards against @@ -407,11 +348,7 @@ static void efx_start_eventq(struct efx_channel *channel) netif_dbg(channel->efx, ifup, channel->efx->net_dev, "chan %d start event queue\n", channel->channel); - /* The interrupt handler for this channel may set work_pending - * as soon as we enable it. Make sure it's cleared before - * then. Similarly, make sure it sees the enabled flag set. - */ - channel->work_pending = false; + /* Make sure the NAPI handler sees the enabled flag set */ channel->enabled = true; smp_wmb(); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index bdb30bb..09e633a 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -109,7 +109,6 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} /* Channels */ extern int efx_channel_dummy_op_int(struct efx_channel *channel); extern void efx_channel_dummy_op_void(struct efx_channel *channel); -extern void efx_process_channel_now(struct efx_channel *channel); extern int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); @@ -155,7 +154,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel) netif_vdbg(channel->efx, intr, channel->efx->net_dev, "channel %d scheduling NAPI poll on CPU%d\n", channel->channel, raw_smp_processor_id()); - channel->work_pending = true; napi_schedule(&channel->napi_str); } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index ea64cd8..ad89d7d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -361,7 +361,6 @@ enum efx_rx_alloc_method { * @irq_moderation: IRQ moderation value (in hardware ticks) * @napi_dev: Net device used with NAPI * @napi_str: NAPI control structure - * @work_pending: Is work pending via NAPI? * @eventq: Event queue buffer * @eventq_mask: Event queue pointer mask * @eventq_read_ptr: Event queue read pointer @@ -393,7 +392,6 @@ struct efx_channel { unsigned int irq_moderation; struct net_device *napi_dev; struct napi_struct napi_str; - bool work_pending; struct efx_special_buffer eventq; unsigned int eventq_mask; unsigned int eventq_read_ptr; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 2069f51..716cff9 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) static int efx_poll_loopback(struct efx_nic *efx) { struct efx_loopback_state *state = efx->loopback_selftest; - struct efx_channel *channel; - /* NAPI polling is not enabled, so process channels - * synchronously */ - efx_for_each_channel(channel, efx) { - if (channel->work_pending) - efx_process_channel_now(channel); - } return atomic_read(&state->rx_good) == state->packet_count; } @@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx) mutex_lock(&efx->mac_lock); efx->type->monitor(efx); mutex_unlock(&efx->mac_lock); - } else { - struct efx_channel *channel = efx_get_channel(efx, 0); - if (channel->work_pending) - efx_process_channel_now(channel); } mutex_lock(&efx->mac_lock); -- cgit v0.10.2 From d829118705f8213ffeffa4fefa8931dea6b7f016 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 5 Oct 2012 23:35:41 +0100 Subject: sfc: Rework IRQ enable/disable There are many problems with the current efx_stop_interrupts() and efx_start_interrupts(): 1. On Siena, it is unsafe to disable the master IRQ enable bit (DRV_INT_EN_KER) while any IRQ sources are enabled. 2. On EF10 there is no master IRQ enable bit, so we cannot expect to defer IRQs without tearing down event queues. (Though I don't think we will need to keep any event queues around while the device is down, as we do for VFDI on Siena.) 3. synchronize_irq() only waits for a running IRQ handler to finish, not for any propagation through IRQ controllers. Therefore an IRQ may still be received and handled after efx_stop_interrupts() returns. IRQ handlers can then race with channel reallocation. To fix this: a. Introduce a software IRQ enable flag. So long as this is clear, IRQ handlers will only acknowledge IRQs and not touch the channel structures. b. Define a new struct efx_msi_context as the context for MSIs. This is never reallocated and is sufficient to find the software enable flag and the channel structure. It also includes the channel/IRQ name, which was previously separated out as it must also not be reallocated. c. Split efx_{start,stop}_interrupts() into efx_{,soft_}_{enable,disable}_interrupts(). The 'soft' functions don't touch the hardware master enable flag (if it exists) and don't reinitialise or tear down channels with the keep_eventq flag set. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2b2dba1..a7818d1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -191,8 +191,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); * *************************************************************************/ -static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq); -static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq); +static void efx_soft_enable_interrupts(struct efx_nic *efx); +static void efx_soft_disable_interrupts(struct efx_nic *efx); static void efx_remove_channel(struct efx_channel *channel); static void efx_remove_channels(struct efx_nic *efx); static const struct efx_channel_type efx_default_channel_type; @@ -520,8 +520,8 @@ static void efx_set_channel_names(struct efx_nic *efx) efx_for_each_channel(channel, efx) channel->type->get_name(channel, - efx->channel_name[channel->channel], - sizeof(efx->channel_name[0])); + efx->msi_context[channel->channel].name, + sizeof(efx->msi_context[0].name)); } static int efx_probe_channels(struct efx_nic *efx) @@ -746,7 +746,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) efx_device_detach_sync(efx); efx_stop_all(efx); - efx_stop_interrupts(efx, true); + efx_soft_disable_interrupts(efx); /* Clone channels (where possible) */ memset(other_channel, 0, sizeof(other_channel)); @@ -796,7 +796,7 @@ out: } } - efx_start_interrupts(efx, true); + efx_soft_enable_interrupts(efx); efx_start_all(efx); netif_device_attach(efx->net_dev); return rc; @@ -1329,23 +1329,17 @@ static int efx_probe_interrupts(struct efx_nic *efx) return 0; } -/* Enable interrupts, then probe and start the event queues */ -static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) +static void efx_soft_enable_interrupts(struct efx_nic *efx) { struct efx_channel *channel; BUG_ON(efx->state == STATE_DISABLED); - if (efx->eeh_disabled_legacy_irq) { - enable_irq(efx->legacy_irq); - efx->eeh_disabled_legacy_irq = false; - } - if (efx->legacy_irq) - efx->legacy_irq_enabled = true; - efx_nic_enable_interrupts(efx); + efx->irq_soft_enabled = true; + smp_wmb(); efx_for_each_channel(channel, efx) { - if (!channel->type->keep_eventq || !may_keep_eventq) + if (!channel->type->keep_eventq) efx_init_eventq(channel); efx_start_eventq(channel); } @@ -1353,7 +1347,7 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) efx_mcdi_mode_event(efx); } -static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) +static void efx_soft_disable_interrupts(struct efx_nic *efx) { struct efx_channel *channel; @@ -1362,22 +1356,57 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) efx_mcdi_mode_poll(efx); - efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) { + efx->irq_soft_enabled = false; + smp_wmb(); + + if (efx->legacy_irq) synchronize_irq(efx->legacy_irq); - efx->legacy_irq_enabled = false; - } efx_for_each_channel(channel, efx) { if (channel->irq) synchronize_irq(channel->irq); efx_stop_eventq(channel); - if (!channel->type->keep_eventq || !may_keep_eventq) + if (!channel->type->keep_eventq) efx_fini_eventq(channel); } } +static void efx_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + BUG_ON(efx->state == STATE_DISABLED); + + if (efx->eeh_disabled_legacy_irq) { + enable_irq(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = false; + } + + efx_nic_enable_interrupts(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) + efx_init_eventq(channel); + } + + efx_soft_enable_interrupts(efx); +} + +static void efx_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_soft_disable_interrupts(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx_nic_disable_interrupts(efx); +} + static void efx_remove_interrupts(struct efx_nic *efx) { struct efx_channel *channel; @@ -2160,7 +2189,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) EFX_ASSERT_RESET_SERIALISED(efx); efx_stop_all(efx); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); mutex_lock(&efx->mac_lock); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) @@ -2199,7 +2228,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) efx->type->reconfigure_mac(efx); - efx_start_interrupts(efx, false); + efx_enable_interrupts(efx); efx_restore_filters(efx); efx_sriov_reset(efx); @@ -2464,6 +2493,8 @@ static int efx_init_struct(struct efx_nic *efx, efx->channel[i] = efx_alloc_channel(efx, i, NULL); if (!efx->channel[i]) goto fail; + efx->msi_context[i].efx = efx; + efx->msi_context[i].index = i; } EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); @@ -2516,7 +2547,7 @@ static void efx_pci_remove_main(struct efx_nic *efx) BUG_ON(efx->state == STATE_READY); cancel_work_sync(&efx->reset_work); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); efx_nic_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); @@ -2538,7 +2569,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); dev_close(efx->net_dev); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); rtnl_unlock(); efx_sriov_fini(efx); @@ -2640,7 +2671,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; - efx_start_interrupts(efx, false); + efx_enable_interrupts(efx); return 0; @@ -2761,7 +2792,7 @@ static int efx_pm_freeze(struct device *dev) efx_device_detach_sync(efx); efx_stop_all(efx); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); } rtnl_unlock(); @@ -2776,7 +2807,7 @@ static int efx_pm_thaw(struct device *dev) rtnl_lock(); if (efx->state != STATE_DISABLED) { - efx_start_interrupts(efx, false); + efx_enable_interrupts(efx); mutex_lock(&efx->mac_lock); efx->phy_op->reconfigure(efx); @@ -2879,7 +2910,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, efx_device_detach_sync(efx); efx_stop_all(efx); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); status = PCI_ERS_RESULT_NEED_RESET; } else { diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index f87710e..f8de382 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -367,6 +367,9 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + return IRQ_HANDLED; + /* Check to see if we have a serious error condition */ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index ad89d7d..7c96372 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -420,6 +420,21 @@ struct efx_channel { }; /** + * struct efx_msi_context - Context for each MSI + * @efx: The associated NIC + * @index: Index of the channel/IRQ + * @name: Name of the channel/IRQ + * + * Unlike &struct efx_channel, this is never reallocated and is always + * safe for the IRQ handler to access. + */ +struct efx_msi_context { + struct efx_nic *efx; + unsigned int index; + char name[IFNAMSIZ + 6]; +}; + +/** * struct efx_channel_type - distinguishes traffic and extra channels * @handle_no_channel: Handle failure to allocate an extra channel * @pre_probe: Set up extra state prior to initialisation @@ -669,7 +684,6 @@ struct vfdi_status; * @pci_dev: The PCI device * @type: Controller type attributes * @legacy_irq: IRQ number - * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? * @workqueue: Workqueue for port reconfigures and the HW monitor. * Work items do not hold and must not acquire RTNL. * @workqueue_name: Name of workqueue @@ -686,7 +700,7 @@ struct vfdi_status; * @tx_queue: TX DMA queues * @rx_queue: RX DMA queues * @channel: Channels - * @channel_name: Names for channels and their IRQs + * @msi_context: Context for each MSI * @extra_channel_types: Types of extra (non-traffic) channels that * should be allocated for this NIC * @rxq_entries: Size of receive queues requested by user. @@ -709,6 +723,8 @@ struct vfdi_status; * @rx_scatter: Scatter mode enabled for receives * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired + * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will + * acknowledge but do nothing else. * @irq_status: Interrupt status buffer * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 * @irq_level: IRQ level/index for IRQs not triggered by an event queue @@ -786,7 +802,6 @@ struct efx_nic { unsigned int port_num; const struct efx_nic_type *type; int legacy_irq; - bool legacy_irq_enabled; bool eeh_disabled_legacy_irq; struct workqueue_struct *workqueue; char workqueue_name[16]; @@ -804,7 +819,7 @@ struct efx_nic { unsigned long reset_pending; struct efx_channel *channel[EFX_MAX_CHANNELS]; - char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; + struct efx_msi_context msi_context[EFX_MAX_CHANNELS]; const struct efx_channel_type * extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; @@ -835,6 +850,7 @@ struct efx_nic { unsigned int_error_count; unsigned long int_error_expire; + bool irq_soft_enabled; struct efx_buffer irq_status; unsigned irq_zero_count; unsigned irq_level; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index f758333..c17d659 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -1567,6 +1567,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; + bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); efx_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct efx_channel *channel; @@ -1574,12 +1575,6 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) u32 queues; int syserr; - /* Could this be ours? If interrupts are disabled then the - * channel state may not be valid. - */ - if (!efx->legacy_irq_enabled) - return result; - /* Read the ISR which also ACKs the interrupts */ efx_readd(efx, ®, FR_BZ_INT_ISR0); queues = EFX_EXTRACT_DWORD(reg, 0, 31); @@ -1595,7 +1590,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) } /* Handle non-event-queue sources */ - if (queues & (1U << efx->irq_level)) { + if (queues & (1U << efx->irq_level) && soft_enabled) { syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return efx_nic_fatal_interrupt(efx); @@ -1607,10 +1602,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) efx->irq_zero_count = 0; /* Schedule processing of any interrupting queues */ - efx_for_each_channel(channel, efx) { - if (queues & 1) - efx_schedule_channel_irq(channel); - queues >>= 1; + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + if (queues & 1) + efx_schedule_channel_irq(channel); + queues >>= 1; + } } result = IRQ_HANDLED; @@ -1623,12 +1620,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; /* Ensure we schedule or rearm all event queues */ - efx_for_each_channel(channel, efx) { - event = efx_event(channel, channel->eventq_read_ptr); - if (efx_event_present(event)) - efx_schedule_channel_irq(channel); - else - efx_nic_eventq_read_ack(channel); + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + event = efx_event(channel, + channel->eventq_read_ptr); + if (efx_event_present(event)) + efx_schedule_channel_irq(channel); + else + efx_nic_eventq_read_ack(channel); + } } } @@ -1649,8 +1649,8 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) */ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) { - struct efx_channel *channel = *(struct efx_channel **)dev_id; - struct efx_nic *efx = channel->efx; + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; efx_oword_t *int_ker = efx->irq_status.addr; int syserr; @@ -1658,8 +1658,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + return IRQ_HANDLED; + /* Handle non-event-queue sources */ - if (channel->channel == efx->irq_level) { + if (context->index == efx->irq_level) { syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return efx_nic_fatal_interrupt(efx); @@ -1667,7 +1670,7 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) } /* Schedule processing of the channel */ - efx_schedule_channel_irq(channel); + efx_schedule_channel_irq(efx->channel[context->index]); return IRQ_HANDLED; } @@ -1739,8 +1742,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) efx_for_each_channel(channel, efx) { rc = request_irq(channel->irq, efx_msi_interrupt, IRQF_PROBE_SHARED, /* Not shared */ - efx->channel_name[channel->channel], - &efx->channel[channel->channel]); + efx->msi_context[channel->channel].name, + &efx->msi_context[channel->channel]); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to hook IRQ %d\n", channel->irq); @@ -1769,7 +1772,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx) efx_for_each_channel(channel, efx) { if (n_irqs-- == 0) break; - free_irq(channel->irq, &efx->channel[channel->channel]); + free_irq(channel->irq, &efx->msi_context[channel->channel]); } fail1: return rc; @@ -1787,7 +1790,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) /* Disable MSI/MSI-X interrupts */ efx_for_each_channel(channel, efx) - free_irq(channel->irq, &efx->channel[channel->channel]); + free_irq(channel->irq, &efx->msi_context[channel->channel]); /* ACK legacy interrupt */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) -- cgit v0.10.2 From 1840667a851efb5f719d2c76b235c172104722e8 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 3 Jan 2013 23:36:57 +0000 Subject: sfc: Limit scope of a Falcon A1 IRQ workaround We unconditionally acknowledge legacy interrupts just before disabling them. This workaround is needed on Falcon A1 but probably not on later chips where the legacy interrupt mechanism is different. It was also originally done after the IRQ handler was removed, not before. Restore the original behaviour for Falcon A1 only by doing this acknowledgement in the efx_nic_type::fini operation. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index f8de382..4492129 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -336,7 +336,7 @@ static void falcon_prepare_flush(struct efx_nic *efx) * * NB most hardware supports MSI interrupts */ -inline void falcon_irq_ack_a1(struct efx_nic *efx) +static inline void falcon_irq_ack_a1(struct efx_nic *efx) { efx_dword_t reg; @@ -2343,7 +2343,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .remove = falcon_remove_nic, .init = falcon_init_nic, .dimension_resources = falcon_dimension_resources, - .fini = efx_port_dummy_op_void, + .fini = falcon_irq_ack_a1, .monitor = falcon_monitor, .map_reset_reason = falcon_map_reset_reason, .map_reset_flags = falcon_map_reset_flags, diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index c17d659..2d0a584 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -1781,7 +1781,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx) void efx_nic_fini_interrupt(struct efx_nic *efx) { struct efx_channel *channel; - efx_oword_t reg; #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); @@ -1792,12 +1791,6 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) efx_for_each_channel(channel, efx) free_irq(channel->irq, &efx->msi_context[channel->channel]); - /* ACK legacy interrupt */ - if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) - efx_reado(efx, ®, FR_BZ_INT_ISR0); - else - falcon_irq_ack_a1(efx); - /* Disable legacy interrupt */ if (efx->legacy_irq) free_irq(efx->legacy_irq, efx); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 9120e8b..33aa120 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -308,7 +308,6 @@ extern void efx_nic_disable_interrupts(struct efx_nic *efx); extern void efx_nic_fini_interrupt(struct efx_nic *efx); extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); -extern void falcon_irq_ack_a1(struct efx_nic *efx); static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) { -- cgit v0.10.2 From d8aec745ddaf278ba187d7712c1becc5ffd0f7da Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 27 May 2013 16:52:54 +0100 Subject: sfc: Stop RX refill before flushing RX queues rx_queue::enabled guards refill, so rename it to reflect that. Clear it at the start of the queue teardown process rather than waiting for the RX queue to be flushed. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index a7818d1..ee9242c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -248,8 +248,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) efx_channel_get_rx_queue(channel); efx_rx_flush_packet(channel); - if (rx_queue->enabled) - efx_fast_push_rx_descriptors(rx_queue); + efx_fast_push_rx_descriptors(rx_queue); } return spent; @@ -647,6 +646,12 @@ static void efx_stop_datapath(struct efx_nic *efx) EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); + /* Stop RX refill */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_queue->refill_enabled = false; + } + /* Only perform flush if dma is enabled */ if (dev->is_busmaster && efx->state != STATE_RECOVERY) { rc = efx_nic_flush_queues(efx); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 7c96372..c9f7989 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -286,7 +286,7 @@ struct efx_rx_page_state { * @buffer: The software buffer ring * @rxd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. - * @enabled: Receive queue enabled indicator. + * @refill_enabled: Enable refill whenever fill level is low * @flush_pending: Set when a RX flush is pending. Has the same lifetime as * @rxq_flush_pending. * @added_count: Number of buffers added to the receive queue. @@ -317,7 +317,7 @@ struct efx_rx_queue { struct efx_rx_buffer *buffer; struct efx_special_buffer rxd; unsigned int ptr_mask; - bool enabled; + bool refill_enabled; bool flush_pending; unsigned int added_count; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 2d0a584..deb0ee04 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -1204,7 +1204,6 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) * queue. Refill it here */ efx_fast_push_rx_descriptors(rx_queue); } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { - rx_queue->enabled = false; efx_handle_drain_event(channel); } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { efx_handle_drain_event(channel); diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 6af9cfd..8b482de 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) unsigned int fill_level, batch_size; int space, rc = 0; + if (!rx_queue->refill_enabled) + return; + /* Calculate current fill level, and exit if we don't need to fill */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); @@ -738,9 +741,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->max_fill = max_fill; rx_queue->fast_fill_trigger = trigger; + rx_queue->refill_enabled = true; /* Set up RX descriptor ring */ - rx_queue->enabled = true; efx_nic_init_rx(rx_queue); } @@ -753,9 +756,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); - /* A flush failure might have left rx_queue->enabled */ - rx_queue->enabled = false; - del_timer_sync(&rx_queue->slow_fill); efx_nic_fini_rx(rx_queue); -- cgit v0.10.2 From 501a248cf6995286a4da0c2aa93c0fa9c5941453 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 27 May 2013 16:52:54 +0100 Subject: sfc: Remove bogus call to efx_release_tx_buffers() efx_unregister_netdev() should not call efx_release_tx_buffers() directly, as it is already done when closing the device: efx_net_stop() -> efx_stop_all() -> efx_stop_datapath() -> efx_fini_tx_queue() -> efx_release_tx_buffers(). (This was presumably a workaround for a race between efx_stop_all() and the data path that has since been properly fixed.) Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index ee9242c..0c3c0c1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2156,22 +2156,11 @@ fail_locked: static void efx_unregister_netdev(struct efx_nic *efx) { - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - if (!efx->net_dev) return; BUG_ON(netdev_priv(efx->net_dev) != efx); - /* Free up any skbs still remaining. This has to happen before - * we try to unregister the netdev as running their destructors - * may be needed to get the device ref. count to 0. */ - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_release_tx_buffers(tx_queue); - } - strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); -- cgit v0.10.2 From e42c3d85af629697699c89aecba481527a1da898 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 27 May 2013 16:52:54 +0100 Subject: sfc: Refactor queue teardown sequence to allow for EF10 flush behaviour Currently efx_stop_datapath() will try to flush our DMA queues (if DMA is enabled), then finalise software and hardware state for each queue. However, for EF10 we must ask the MC to finalise each queue, which implicitly starts flushing it, and then wait for the flush events. We therefore need to delegate more of this to the NIC type. Combine all the hardware operations into a new NIC-type operation efx_nic_type::fini_dmaq, and call this before tearing down the software state and buffers for all the DMA queues. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0c3c0c1..9c6555c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -640,7 +640,6 @@ static void efx_stop_datapath(struct efx_nic *efx) struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; - struct pci_dev *dev = efx->pci_dev; int rc; EFX_ASSERT_RESET_SERIALISED(efx); @@ -652,26 +651,6 @@ static void efx_stop_datapath(struct efx_nic *efx) rx_queue->refill_enabled = false; } - /* Only perform flush if dma is enabled */ - if (dev->is_busmaster && efx->state != STATE_RECOVERY) { - rc = efx_nic_flush_queues(efx); - - if (rc && EFX_WORKAROUND_7803(efx)) { - /* Schedule a reset to recover from the flush failure. The - * descriptor caches reference memory we're about to free, - * but falcon_reconfigure_mac_wrapper() won't reconnect - * the MACs because of the pending reset. */ - netif_err(efx, drv, efx->net_dev, - "Resetting to recover from flush failure\n"); - efx_schedule_reset(efx, RESET_TYPE_ALL); - } else if (rc) { - netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); - } else { - netif_dbg(efx, drv, efx->net_dev, - "successfully flushed all queues\n"); - } - } - efx_for_each_channel(channel, efx) { /* RX packet processing is pipelined, so wait for the * NAPI handler to complete. At least event queue 0 @@ -683,7 +662,26 @@ static void efx_stop_datapath(struct efx_nic *efx) efx_stop_eventq(channel); efx_start_eventq(channel); } + } + rc = efx->type->fini_dmaq(efx); + if (rc && EFX_WORKAROUND_7803(efx)) { + /* Schedule a reset to recover from the flush failure. The + * descriptor caches reference memory we're about to free, + * but falcon_reconfigure_mac_wrapper() won't reconnect + * the MACs because of the pending reset. + */ + netif_err(efx, drv, efx->net_dev, + "Resetting to recover from flush failure\n"); + efx_schedule_reset(efx, RESET_TYPE_ALL); + } else if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } + + efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); efx_for_each_possible_channel_tx_queue(tx_queue, channel) diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 09e633a..45de5b9 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); -extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); extern netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); extern netdev_tx_t diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 4492129..c8efcb0 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2351,6 +2351,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .probe_port = falcon_probe_port, .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, + .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = falcon_prepare_flush, .finish_flush = efx_port_dummy_op_void, .update_stats = falcon_update_nic_stats, @@ -2396,6 +2397,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .probe_port = falcon_probe_port, .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, + .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = falcon_prepare_flush, .finish_flush = efx_port_dummy_op_void, .update_stats = falcon_update_nic_stats, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index c9f7989..b382895 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -953,8 +953,11 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @probe_port: Probe the MAC and PHY * @remove_port: Free resources allocated by probe_port() * @handle_global_event: Handle a "global" event (may be %NULL) + * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) * @prepare_flush: Prepare the hardware for flushing the DMA queues - * @finish_flush: Clean up after flushing the DMA queues + * (for Falcon architecture) + * @finish_flush: Clean up after flushing the DMA queues (for Falcon + * architecture) * @update_stats: Update statistics not provided by event handling * @start_stats: Start the regular fetching of statistics * @stop_stats: Stop the regular fetching of statistics @@ -1014,6 +1017,7 @@ struct efx_nic_type { int (*probe_port)(struct efx_nic *efx); void (*remove_port)(struct efx_nic *efx); bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); + int (*fini_dmaq)(struct efx_nic *efx); void (*prepare_flush)(struct efx_nic *efx); void (*finish_flush)(struct efx_nic *efx); void (*update_stats)(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index deb0ee04..7c52691 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -721,7 +721,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx) /* Flush all the transmit queues, and continue flushing receive queues until * they're all flushed. Wait for the DRAIN events to be recieved so that there * are no more RX and TX events left on any channel. */ -int efx_nic_flush_queues(struct efx_nic *efx) +static int efx_farch_do_flush(struct efx_nic *efx) { unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ struct efx_channel *channel; @@ -729,8 +729,6 @@ int efx_nic_flush_queues(struct efx_nic *efx) struct efx_tx_queue *tx_queue; int rc = 0; - efx->type->prepare_flush(efx); - efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { atomic_inc(&efx->drain_pending); @@ -791,7 +789,32 @@ int efx_nic_flush_queues(struct efx_nic *efx) atomic_set(&efx->rxq_flush_outstanding, 0); } - efx->type->finish_flush(efx); + return rc; +} + +int efx_farch_fini_dmaq(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc = 0; + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + /* Only perform flush if DMA is enabled */ + if (efx->pci_dev->is_busmaster) { + efx->type->prepare_flush(efx); + rc = efx_farch_do_flush(efx); + efx->type->finish_flush(efx); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_nic_fini_rx(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_nic_fini_tx(tx_queue); + } + } return rc; } diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 33aa120..21f662c 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -260,14 +260,12 @@ extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); /* TX data path */ extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); -extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); /* RX data path */ extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); -extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue); @@ -319,7 +317,7 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) } /* Global Resources */ -extern int efx_nic_flush_queues(struct efx_nic *efx); +extern int efx_farch_fini_dmaq(struct efx_nic *efx); extern void siena_prepare_flush(struct efx_nic *efx); extern void siena_finish_flush(struct efx_nic *efx); extern void falcon_start_nic_stats(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8b482de..f2b78cd 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -757,7 +757,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); del_timer_sync(&rx_queue->slow_fill); - efx_nic_fini_rx(rx_queue); /* Release RX buffers from the current read ptr to the write ptr */ if (rx_queue->buffer) { diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index d0eeb03..b4c1d43 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -685,6 +685,7 @@ const struct efx_nic_type siena_a0_nic_type = { .reset = efx_mcdi_reset, .probe_port = efx_mcdi_port_probe, .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = siena_prepare_flush, .finish_flush = siena_finish_flush, .update_stats = siena_update_nic_stats, diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index c0d4040..4903c4f 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -543,10 +543,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->initialised = true; } -void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "shutting down TX queue %d\n", tx_queue->queue); + if (!tx_queue->buffer) return; @@ -561,22 +564,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) netdev_tx_reset_queue(tx_queue->core_txq); } -void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) -{ - if (!tx_queue->initialised) - return; - - netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, - "shutting down TX queue %d\n", tx_queue->queue); - - tx_queue->initialised = false; - - /* Flush TX queue, remove descriptor ring */ - efx_nic_fini_tx(tx_queue); - - efx_release_tx_buffers(tx_queue); -} - void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { int i; -- cgit v0.10.2 From 012ec81223aa45d2b80aeafb77392fd1a19c7b10 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 21 Aug 2013 11:24:10 -0400 Subject: qlcnic: Multi Tx queue support for 82xx Series adapter. o 82xx firmware allows support for multiple Tx queues. This patch will enable multi Tx queue support for 82xx series adapter. Max number of Tx queues supported will be 8. Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 3dcc666..2f9985f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -97,6 +97,9 @@ #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + MGMT_CMD_DESC_RESV) #define QLCNIC_MAX_TX_TIMEOUTS 2 +#define QLCNIC_MAX_TX_RINGS 8 +#define QLCNIC_MAX_SDS_RINGS 8 + /* * Following are the states of the Phantom. Phantom will set them and * Host will read to check if the fields are correct. @@ -515,6 +518,7 @@ struct qlcnic_host_sds_ring { u32 num_desc; void __iomem *crb_sts_consumer; + struct qlcnic_host_tx_ring *tx_ring; struct status_desc *desc_head; struct qlcnic_adapter *adapter; struct napi_struct napi; @@ -532,9 +536,17 @@ struct qlcnic_host_tx_ring { void __iomem *crb_intr_mask; char name[IFNAMSIZ + 12]; u16 ctx_id; + + u32 state; u32 producer; u32 sw_consumer; u32 num_desc; + + u64 xmit_on; + u64 xmit_off; + u64 xmit_called; + u64 xmit_finished; + void __iomem *crb_cmd_producer; struct cmd_desc_type0 *desc_head; struct qlcnic_adapter *adapter; @@ -559,7 +571,6 @@ struct qlcnic_recv_context { u32 state; u16 context_id; u16 virt_port; - }; /* HW context creation */ @@ -604,6 +615,7 @@ struct qlcnic_recv_context { #define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) #define QLCNIC_CAP0_VALIDOFF (1 << 11) #define QLCNIC_CAP0_LRO_MSS (1 << 21) +#define QLCNIC_CAP0_TX_MULTI (1 << 22) /* * Context state @@ -631,7 +643,7 @@ struct qlcnic_hostrq_rds_ring { struct qlcnic_hostrq_rx_ctx { __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le32 capabilities[4]; /* Flag bit vector */ + __le32 capabilities[4]; /* Flag bit vector */ __le32 host_int_crb_mode; /* Interrupt crb usage */ __le32 host_rds_crb_mode; /* RDS crb usage */ /* These ring offsets are relative to data[0] below */ @@ -814,6 +826,7 @@ struct qlcnic_mac_list_s { #define QLCNIC_FW_CAPABILITY_BDG BIT_8 #define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 #define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 +#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4 #define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 #define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31 @@ -922,6 +935,7 @@ struct qlcnic_ipaddr { #define QLCNIC_BEACON_DISABLE 0xD #define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 +#define QLCNIC_DEF_NUM_TX_RINGS 4 #define QLCNIC_MSIX_TBL_SPACE 8192 #define QLCNIC_PCI_REG_MSIX_TBL 0x44 #define QLCNIC_MSIX_TBL_PGSIZE 4096 @@ -937,6 +951,7 @@ struct qlcnic_ipaddr { #define __QLCNIC_DIAG_RES_ALLOC 6 #define __QLCNIC_LED_ENABLE 7 #define __QLCNIC_ELB_INPROGRESS 8 +#define __QLCNIC_MULTI_TX_UNIQUE 9 #define __QLCNIC_SRIOV_ENABLE 10 #define __QLCNIC_SRIOV_CAPABLE 11 #define __QLCNIC_MBX_POLL_ENABLE 12 @@ -1482,7 +1497,8 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter); void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); -void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); +void qlcnic_release_tx_buffers(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); void qlcnic_watchdog_task(struct work_struct *work); @@ -1543,6 +1559,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *); void qlcnic_advert_link_change(struct qlcnic_adapter *, int); void qlcnic_free_tx_rings(struct qlcnic_adapter *); int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *); +void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *); void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); @@ -1605,6 +1622,26 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) tx_ring->producer; } +static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + int err, tx_q; + + tx_q = adapter->max_drv_tx_rings; + + netdev->num_tx_queues = tx_q; + netdev->real_num_tx_queues = tx_q; + + err = netif_set_real_num_tx_queues(netdev, tx_q); + if (err) + dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n", + tx_q); + else + dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q); + + return err; +} + struct qlcnic_nic_template { int (*config_bridged_mode) (struct qlcnic_adapter *, u32); int (*config_led) (struct qlcnic_adapter *, u32, u32); @@ -1932,16 +1969,43 @@ static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, adapter->nic_ops->config_ipaddr(adapter, ip, cmd); } +static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter) +{ + return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); +} + +static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter) +{ + test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); + adapter->max_drv_tx_rings = 1; +} + +/* When operating in a muti tx mode, driver needs to write 0x1 + * to src register, instead of 0x0 to disable receiving interrupt. + */ static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) { - writel(0, sds_ring->crb_intr_mask); + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (qlcnic_check_multi_tx(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED)) + writel(0x1, sds_ring->crb_intr_mask); + else + writel(0, sds_ring->crb_intr_mask); } +/* When operating in a muti tx mode, driver needs to write 0x0 + * to src register, instead of 0x1 to enable receiving interrupts. + */ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) { struct qlcnic_adapter *adapter = sds_ring->adapter; - writel(0x1, sds_ring->crb_intr_mask); + if (qlcnic_check_multi_tx(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED)) + writel(0, sds_ring->crb_intr_mask); + else + writel(0x1, sds_ring->crb_intr_mask); if (!QLCNIC_IS_MSI_FAMILY(adapter)) writel(0xfbff, adapter->tgt_mask_reg); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index c5daf78..f0dc5d4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -695,8 +695,8 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, u32 data[]); -static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd) +void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { int i; @@ -1691,7 +1691,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) /* Make sure carrier is off and queue is stopped during loopback */ if (netif_running(netdev)) { netif_carrier_off(netdev); - netif_stop_queue(netdev); + netif_tx_stop_all_queues(netdev); } ret = qlcnic_do_lb_test(adapter, mode); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index d09389b..b8d9750 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -38,6 +38,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, + {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3}, }; static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) @@ -171,6 +172,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, break; } dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); + qlcnic_dump_mbx(adapter, cmd); } else if (rsp == QLCNIC_CDRP_RSP_OK) cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; @@ -243,40 +245,38 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) { - void *addr; - struct qlcnic_hostrq_rx_ctx *prq; - struct qlcnic_cardrsp_rx_ctx *prsp; - struct qlcnic_hostrq_rds_ring *prq_rds; - struct qlcnic_hostrq_sds_ring *prq_sds; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_hardware_context *ahw = adapter->ahw; + dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; + struct net_device *netdev = adapter->netdev; + u32 temp_intr_crb_mode, temp_rds_crb_mode; struct qlcnic_cardrsp_rds_ring *prsp_rds; struct qlcnic_cardrsp_sds_ring *prsp_sds; + struct qlcnic_hostrq_rds_ring *prq_rds; + struct qlcnic_hostrq_sds_ring *prq_sds; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_cmd_args cmd; - - dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; - u64 phys_addr; - + struct qlcnic_cardrsp_rx_ctx *prsp; + struct qlcnic_hostrq_rx_ctx *prq; u8 i, nrds_rings, nsds_rings; - u16 temp_u16; + struct qlcnic_cmd_args cmd; size_t rq_size, rsp_size; u32 cap, reg, val, reg2; + u64 phys_addr; + u16 temp_u16; + void *addr; int err; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - nrds_rings = adapter->max_rds_rings; nsds_rings = adapter->max_sds_rings; - rq_size = - SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, - nsds_rings); - rsp_size = - SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, - nsds_rings); + rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, + nsds_rings); + rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, + nsds_rings); addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, - &hostrq_phys_addr, GFP_KERNEL); + &hostrq_phys_addr, GFP_KERNEL); if (addr == NULL) return -ENOMEM; prq = addr; @@ -295,15 +295,19 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) | QLCNIC_CAP0_VALIDOFF); cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); - temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); - prq->valid_field_offset = cpu_to_le16(temp_u16); - prq->txrx_sds_binding = nsds_rings - 1; + if (qlcnic_check_multi_tx(adapter)) { + cap |= QLCNIC_CAP0_TX_MULTI; + } else { + temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); + prq->valid_field_offset = cpu_to_le16(temp_u16); + prq->txrx_sds_binding = nsds_rings - 1; + temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; + prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode); + temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE; + prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode); + } prq->capabilities[0] = cpu_to_le32(cap); - prq->host_int_crb_mode = - cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); - prq->host_rds_crb_mode = - cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); prq->num_rds_rings = cpu_to_le16(nrds_rings); prq->num_sds_rings = cpu_to_le16(nsds_rings); @@ -317,10 +321,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) le32_to_cpu(prq->rds_ring_offset)); for (i = 0; i < nrds_rings; i++) { - rds_ring = &recv_ctx->rds_rings[i]; rds_ring->producer = 0; - prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); prq_rds[i].ring_kind = cpu_to_le32(i); @@ -331,14 +333,15 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) le32_to_cpu(prq->sds_ring_offset)); for (i = 0; i < nsds_rings; i++) { - sds_ring = &recv_ctx->sds_rings[i]; sds_ring->consumer = 0; memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); - prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); - prq_sds[i].msi_index = cpu_to_le16(i); + if (qlcnic_check_multi_tx(adapter)) + prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id); + else + prq_sds[i].msi_index = cpu_to_le16(i); } phys_addr = hostrq_phys_addr; @@ -361,9 +364,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { rds_ring = &recv_ctx->rds_rings[i]; - reg = le32_to_cpu(prsp_rds[i].host_producer_crb); - rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; + rds_ring->crb_rcv_producer = ahw->pci_base0 + reg; } prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) @@ -371,24 +373,30 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { sds_ring = &recv_ctx->sds_rings[i]; - reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); - reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); + if (qlcnic_check_multi_tx(adapter)) + reg2 = ahw->intr_tbl[i].src; + else + reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); - sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; - sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; + sds_ring->crb_intr_mask = ahw->pci_base0 + reg2; + sds_ring->crb_sts_consumer = ahw->pci_base0 + reg; } recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); recv_ctx->context_id = le16_to_cpu(prsp->context_id); recv_ctx->virt_port = prsp->virt_port; + netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n", + recv_ctx->context_id, recv_ctx->state); qlcnic_free_mbx_args(&cmd); + out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, cardrsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); + return err; } @@ -416,16 +424,19 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring, int ring) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; struct qlcnic_hostrq_tx_ctx *prq; struct qlcnic_hostrq_cds_ring *prq_cds; struct qlcnic_cardrsp_tx_ctx *prsp; - void *rq_addr, *rsp_addr; - size_t rq_size, rsp_size; - u32 temp; struct qlcnic_cmd_args cmd; - int err; - u64 phys_addr; - dma_addr_t rq_phys_addr, rsp_phys_addr; + u32 temp, intr_mask, temp_int_crb_mode; + dma_addr_t rq_phys_addr, rsp_phys_addr; + int temp_nsds_rings, index, err; + void *rq_addr, *rsp_addr; + size_t rq_size, rsp_size; + u64 phys_addr; + u16 msix_id; /* reset host resources */ tx_ring->producer = 0; @@ -447,18 +458,28 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, } prq = rq_addr; - prsp = rsp_addr; prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | - QLCNIC_CAP0_LSO); + QLCNIC_CAP0_LSO); + if (qlcnic_check_multi_tx(adapter)) + temp |= QLCNIC_CAP0_TX_MULTI; + prq->capabilities[0] = cpu_to_le32(temp); - prq->host_int_crb_mode = - cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); - prq->msi_index = 0; + if (qlcnic_check_multi_tx(adapter) && + (adapter->max_drv_tx_rings > 1)) { + temp_nsds_rings = adapter->max_sds_rings; + index = temp_nsds_rings + ring; + msix_id = ahw->intr_tbl[index].id; + prq->msi_index = cpu_to_le16(msix_id); + } else { + temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; + prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode); + prq->msi_index = 0; + } prq->interrupt_ctl = 0; prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); @@ -480,15 +501,24 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, err = qlcnic_issue_cmd(adapter, &cmd); if (err == QLCNIC_RCODE_SUCCESS) { + tx_ring->state = le32_to_cpu(prsp->host_ctx_state); temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; tx_ring->ctx_id = le16_to_cpu(prsp->context_id); + if (qlcnic_check_multi_tx(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED)) { + index = adapter->max_sds_rings + ring; + intr_mask = ahw->intr_tbl[index].src; + tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask; + } + + netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n", + tx_ring->ctx_id, tx_ring->state); } else { - dev_err(&adapter->pdev->dev, - "Failed to create tx ctx in firmware%d\n", err); + netdev_err(netdev, "Failed to create tx ctx in firmware%d\n", + err); err = -EIO; } - qlcnic_free_mbx_args(&cmd); out_free_rsp: @@ -618,6 +648,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) } } + if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) { + err = qlcnic_82xx_mq_intrpt(dev, 1); + if (err) + return err; + } + err = qlcnic_fw_cmd_create_rx_ctx(dev); if (err) goto err_out; @@ -639,9 +676,14 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) } set_bit(__QLCNIC_FW_ATTACHED, &dev->state); + return 0; err_out: + if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(dev)) + qlcnic_82xx_config_intrpt(dev, 0); + if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) qlcnic_83xx_config_intrpt(dev, 0); @@ -659,6 +701,11 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) qlcnic_fw_cmd_del_tx_ctx(adapter, &adapter->tx_ring[ring]); + if (qlcnic_82xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(adapter)) + qlcnic_82xx_config_intrpt(adapter, 0); + if (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED)) { if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) @@ -723,6 +770,51 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) } } +int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; + struct qlcnic_cmd_args cmd; + u32 type, val; + int i, err = 0; + + for (i = 0; i < ahw->num_msix; i++) { + qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_MQ_TX_CONFIG_INTR); + type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; + val = type | (ahw->intr_tbl[i].type << 4); + if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) + val |= (ahw->intr_tbl[i].id << 16); + cmd.req.arg[1] = val; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + netdev_err(netdev, "Failed to %s interrupts %d\n", + op_type == QLCNIC_INTRPT_ADD ? "Add" : + "Delete", err); + qlcnic_free_mbx_args(&cmd); + return err; + } + val = cmd.rsp.arg[1]; + if (LSB(val)) { + netdev_info(netdev, + "failed to configure interrupt for %d\n", + ahw->intr_tbl[i].id); + continue; + } + if (op_type) { + ahw->intr_tbl[i].id = MSW(val); + ahw->intr_tbl[i].enabled = 1; + ahw->intr_tbl[i].src = cmd.rsp.arg[2]; + } else { + ahw->intr_tbl[i].id = i; + ahw->intr_tbl[i].enabled = 0; + ahw->intr_tbl[i].src = 0; + } + qlcnic_free_mbx_args(&cmd); + } + + return err; +} int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 4d5f59b..9e49c15 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -387,7 +387,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return -EIO; - tx_ring = adapter->tx_ring; + tx_ring = &adapter->tx_ring[0]; __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 4a71b28..e9a2225 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -87,6 +87,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_CONFIG_VPORT 0x32 #define QLCNIC_CMD_GET_MAC_STATS 0x37 #define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 +#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39 #define QLCNIC_CMD_GET_LED_STATUS 0x3C #define QLCNIC_CMD_CONFIGURE_RSS 0x41 #define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 @@ -177,6 +178,8 @@ int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8); irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *); +int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int); +int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8); int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, struct qlcnic_host_tx_ring *tx_ring, int); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index 974d626..240b49f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -127,12 +127,12 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) } } -void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) +void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_buffer *cmd_buf; struct qlcnic_skb_frag *buffrag; int i, j; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { @@ -241,7 +241,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) sds_ring->irq = adapter->msix_entries[ring].vector; sds_ring->adapter = adapter; sds_ring->num_desc = adapter->num_rxd; - + if (qlcnic_82xx_check(adapter)) { + if (qlcnic_check_multi_tx(adapter)) + sds_ring->tx_ring = &adapter->tx_ring[ring]; + else + sds_ring->tx_ring = &adapter->tx_ring[0]; + } for (i = 0; i < NUM_RCV_DESC_RINGS; i++) INIT_LIST_HEAD(&sds_ring->free_list[i]); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index cec0908..f4b09f4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -127,6 +127,21 @@ struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, struct qlcnic_host_rds_ring *, u16, u16); +inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (qlcnic_check_multi_tx(adapter)) + writel(0x0, tx_ring->crb_intr_mask); +} + + +static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (qlcnic_check_multi_tx(adapter)) + writel(1, tx_ring->crb_intr_mask); +} + inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { @@ -354,14 +369,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, } static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, - struct cmd_desc_type0 *first_desc, struct sk_buff *skb) + struct cmd_desc_type0 *first_desc, struct sk_buff *skb, + struct qlcnic_host_tx_ring *tx_ring) { u8 l4proto, opcode = 0, hdr_len = 0; u16 flags = 0, vlan_tci = 0; int copied, offset, copy_len, size; struct cmd_desc_type0 *hwdesc; struct vlan_ethhdr *vh; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; u16 protocol = ntohs(skb->protocol); u32 producer = tx_ring->producer; @@ -544,7 +559,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc) netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_cmd_buffer *pbuf; struct qlcnic_skb_frag *buffrag; struct cmd_desc_type0 *hwdesc, *first_desc; @@ -553,10 +568,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) int i, k, frag_count, delta = 0; u32 producer, num_txd; - num_txd = tx_ring->num_desc; - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_stop_queue(netdev); + netif_tx_stop_all_queues(netdev); return NETDEV_TX_BUSY; } @@ -566,7 +579,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) goto drop_packet; } + if (qlcnic_check_multi_tx(adapter)) + tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)]; + else + tx_ring = &adapter->tx_ring[0]; + num_txd = tx_ring->num_desc; + frag_count = skb_shinfo(skb)->nr_frags + 1; + /* 14 frags supported for normal packet and * 32 frags supported for TSO packet */ @@ -581,11 +601,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { - netif_stop_queue(netdev); + netif_tx_stop_queue(tx_ring->txq); if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { - netif_start_queue(netdev); + netif_tx_start_queue(tx_ring->txq); } else { adapter->stats.xmit_off++; + tx_ring->xmit_off++; return NETDEV_TX_BUSY; } } @@ -640,7 +661,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring->producer = get_next_index(producer, num_txd); smp_mb(); - if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) + if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring))) goto unwind_buff; if (adapter->drv_mac_learn) @@ -648,6 +669,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) adapter->stats.txbytes += skb->len; adapter->stats.xmitcalled++; + tx_ring->xmit_called++; qlcnic_update_cmd_producer(tx_ring); @@ -670,7 +692,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) adapter->ahw->linkup = 0; if (netif_running(netdev)) { netif_carrier_off(netdev); - netif_stop_queue(netdev); + netif_tx_stop_all_queues(netdev); } } else if (!adapter->ahw->linkup && linkup) { netdev_info(netdev, "NIC Link is up\n"); @@ -765,9 +787,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, struct net_device *netdev = adapter->netdev; struct qlcnic_skb_frag *frag; - if (!spin_trylock(&adapter->tx_clean_lock)) - return 1; - sw_consumer = tx_ring->sw_consumer; hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); @@ -785,6 +804,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, frag->dma = 0ULL; } adapter->stats.xmitfinished++; + tx_ring->xmit_finished++; dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } @@ -797,10 +817,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, if (count && netif_running(netdev)) { tx_ring->sw_consumer = sw_consumer; smp_mb(); - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { + if (netif_tx_queue_stopped(tx_ring->txq) && + netif_carrier_ok(netdev)) { if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { - netif_wake_queue(netdev); + netif_tx_wake_queue(tx_ring->txq); adapter->stats.xmit_on++; + tx_ring->xmit_on++; } } adapter->tx_timeo_cnt = 0; @@ -820,7 +842,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); return done; } @@ -830,16 +851,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) int tx_complete, work_done; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; - tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring, + tx_ring = sds_ring->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_process_rcv_ring(sds_ring, budget); if ((work_done < budget) && tx_complete) { napi_complete(&sds_ring->napi); - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_int(sds_ring); + qlcnic_enable_tx_intr(adapter, tx_ring); + } + } + + return work_done; +} + +static int qlcnic_tx_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_adapter *adapter; + int work_done; + + tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); + adapter = tx_ring->adapter; + + work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + if (work_done) { + napi_complete(&tx_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_tx_intr(adapter, tx_ring); } return work_done; @@ -1411,6 +1456,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, int ring, max_sds_rings; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; @@ -1419,12 +1465,21 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; - if (ring == adapter->max_sds_rings - 1) - netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, - QLCNIC_NETDEV_WEIGHT / max_sds_rings); - else + if (qlcnic_check_multi_tx(adapter) && + (adapter->max_drv_tx_rings > 1)) { netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, - QLCNIC_NETDEV_WEIGHT*2); + QLCNIC_NETDEV_WEIGHT * 2); + } else { + if (ring == (adapter->max_sds_rings - 1)) + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_poll, + QLCNIC_NETDEV_WEIGHT / + max_sds_rings); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_rx_poll, + QLCNIC_NETDEV_WEIGHT * 2); + } } if (qlcnic_alloc_tx_rings(adapter, netdev)) { @@ -1432,6 +1487,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, return -ENOMEM; } + if (qlcnic_check_multi_tx(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, + QLCNIC_NETDEV_WEIGHT); + } + } + return 0; } @@ -1440,6 +1503,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; @@ -1447,6 +1511,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) } qlcnic_free_sds_rings(adapter->recv_ctx); + + if (qlcnic_check_multi_tx(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_del(&tx_ring->napi); + } + } + qlcnic_free_tx_rings(adapter); } @@ -1454,6 +1526,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) @@ -1464,12 +1537,23 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) napi_enable(&sds_ring->napi); qlcnic_enable_int(sds_ring); } + + if (qlcnic_check_multi_tx(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED) && + (adapter->max_drv_tx_rings > 1)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + napi_enable(&tx_ring->napi); + qlcnic_enable_tx_intr(adapter, tx_ring); + } + } } void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) @@ -1481,6 +1565,16 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) napi_synchronize(&sds_ring->napi); napi_disable(&sds_ring->napi); } + + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + qlcnic_disable_tx_int(adapter, tx_ring); + napi_synchronize(&tx_ring->napi); + napi_disable(&tx_ring->napi); + } + } } #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 1f97a73..af2b2e2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -261,7 +261,6 @@ static const struct qlcnic_board_info qlcnic_boards[] = { }; #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) -#define QLC_MAX_SDS_RINGS 8 static const struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; @@ -523,11 +522,30 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .free_mac_list = qlcnic_82xx_free_mac_list, }; +static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int num_tx_q; + + if (ahw->msix_supported && + (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) { + num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS, + num_online_cpus()); + if (num_tx_q > 1) { + test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, + &adapter->state); + adapter->max_drv_tx_rings = num_tx_q; + } + } else { + adapter->max_drv_tx_rings = 1; + } +} + int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) { struct pci_dev *pdev = adapter->pdev; + int max_tx_rings, max_sds_rings, tx_vector; int err = -1, i; - int max_tx_rings, tx_vector; if (adapter->flags & QLCNIC_TX_INTR_SHARED) { max_tx_rings = 0; @@ -561,7 +579,14 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) adapter->max_sds_rings = num_msix - max_tx_rings - 1; } else { - adapter->max_sds_rings = num_msix; + adapter->ahw->num_msix = num_msix; + if (qlcnic_check_multi_tx(adapter) && + (adapter->max_drv_tx_rings > 1)) + max_sds_rings = num_msix - max_tx_rings; + else + max_sds_rings = num_msix; + + adapter->max_sds_rings = max_sds_rings; } dev_info(&pdev->dev, "using msi-x interrupts\n"); return err; @@ -577,6 +602,8 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) num_msix += (max_tx_rings + 1); } else { num_msix = rounddown_pow_of_two(err); + if (qlcnic_check_multi_tx(adapter)) + num_msix += max_tx_rings; } if (num_msix) { @@ -612,6 +639,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) adapter->msix_entries[0].vector = pdev->irq; return err; } + if (qlcnic_use_msi || qlcnic_use_msi_x) return -EOPNOTSUPP; @@ -630,26 +658,63 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) { + struct qlcnic_hardware_context *ahw = adapter->ahw; int num_msix, err = 0; if (!num_intr) num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS; - if (adapter->ahw->msix_supported) + if (ahw->msix_supported) { num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), num_intr)); - else + if (qlcnic_check_multi_tx(adapter)) + num_msix += adapter->max_drv_tx_rings; + } else { num_msix = 1; + } err = qlcnic_enable_msix(adapter, num_msix); - if (err == -ENOMEM || !err) + if (err == -ENOMEM) return err; - err = qlcnic_enable_msi_legacy(adapter); - if (!err) + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + qlcnic_disable_multi_tx(adapter); + + err = qlcnic_enable_msi_legacy(adapter); + if (!err) + return err; + } + + return 0; +} + +int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err, i; + + if (qlcnic_check_multi_tx(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED)) { + ahw->intr_tbl = vzalloc(ahw->num_msix * + sizeof(struct qlcnic_intrpt_config)); + if (!ahw->intr_tbl) + return -ENOMEM; + + for (i = 0; i < ahw->num_msix; i++) { + ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX; + ahw->intr_tbl[i].id = i; + ahw->intr_tbl[i].src = 0; + } + + err = qlcnic_82xx_config_intrpt(adapter, 1); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to configure Interrupt for %d vector\n", + ahw->num_msix); return err; + } - return -EIO; + return 0; } void qlcnic_teardown_intr(struct qlcnic_adapter *adapter) @@ -1422,6 +1487,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) for (ring = 0; ring < num_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_82xx_check(adapter) && + !qlcnic_check_multi_tx(adapter) && (ring == (num_sds_rings - 1))) { if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) @@ -1445,9 +1511,11 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) return err; } } - if (qlcnic_83xx_check(adapter) && - (adapter->flags & QLCNIC_MSIX_ENABLED) && - !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + if ((qlcnic_82xx_check(adapter) && + qlcnic_check_multi_tx(adapter)) || + (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED))) { handler = qlcnic_msix_tx_intr; for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { @@ -1482,8 +1550,10 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter) free_irq(sds_ring->irq, sds_ring); } } - if (qlcnic_83xx_check(adapter) && - !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + if ((qlcnic_83xx_check(adapter) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) || + (qlcnic_82xx_check(adapter) && + qlcnic_check_multi_tx(adapter))) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; @@ -1519,8 +1589,10 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) return 0; + if (qlcnic_set_eswitch_port_config(adapter)) return -EIO; + qlcnic_get_lro_mss_capability(adapter); if (qlcnic_fw_create_ctx(adapter)) @@ -1567,6 +1639,8 @@ int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) { + int ring; + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; @@ -1576,7 +1650,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); smp_mb(); - spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); adapter->ahw->linkup = 0; netif_tx_disable(netdev); @@ -1594,8 +1667,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP; qlcnic_reset_rx_buffers_list(adapter); - qlcnic_release_tx_buffers(adapter); - spin_unlock(&adapter->tx_clean_lock); + + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) + qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); } /* Usage: During suspend and firmware recovery module */ @@ -1916,6 +1990,12 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; + if (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter)) { + err = qlcnic_set_real_num_queues(adapter, netdev); + if (err) + return err; + } + err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "failed to register net device\n"); @@ -1984,7 +2064,8 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, tx_ring->cmd_buf_arr = cmd_buf_arr; } - if (qlcnic_83xx_check(adapter)) { + if (qlcnic_83xx_check(adapter) || + (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; tx_ring->adapter = adapter; @@ -1995,6 +2076,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, } } } + return 0; } @@ -2072,7 +2154,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_free_hw_res; - netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); + netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter), + QLCNIC_MAX_TX_RINGS); if (!netdev) { err = -ENOMEM; goto err_out_iounmap; @@ -2102,12 +2185,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->fdb_mac_learn = true; else if (qlcnic_mac_learn == DRV_MAC_LEARN) adapter->drv_mac_learn = true; - adapter->max_drv_tx_rings = 1; rwlock_init(&adapter->ahw->crb_lock); mutex_init(&adapter->ahw->mem_lock); - spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); if (qlcnic_82xx_check(adapter)) { @@ -2119,12 +2200,27 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_hw; } + qlcnic_get_multiq_capability(adapter); + + if ((adapter->ahw->act_pci_func > 2) && + qlcnic_check_multi_tx(adapter)) { + adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS; + dev_info(&adapter->pdev->dev, + "vNIC mode enabled, Set max TX rings = %d\n", + adapter->max_drv_tx_rings); + } + + if (!qlcnic_check_multi_tx(adapter)) { + clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); + adapter->max_drv_tx_rings = 1; + } err = qlcnic_setup_idc_param(adapter); if (err) goto err_out_free_hw; adapter->flags |= QLCNIC_NEED_FLR; } else if (qlcnic_83xx_check(adapter)) { + adapter->max_drv_tx_rings = 1; qlcnic_83xx_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; err = qlcnic_83xx_init(adapter, pci_using_dac); @@ -2345,7 +2441,7 @@ static int qlcnic_open(struct net_device *netdev) if (err) goto err_out; - netif_start_queue(netdev); + netif_tx_start_all_queues(netdev); return 0; @@ -2477,6 +2573,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter) static void qlcnic_tx_timeout(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_tx_ring *tx_ring; + int ring; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return; @@ -2490,6 +2588,25 @@ static void qlcnic_tx_timeout(struct net_device *netdev) QLCNIC_FORCE_FW_DUMP_KEY); } else { netdev_info(netdev, "Tx timeout, reset adapter context.\n"); + if (qlcnic_82xx_check(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; + ring++) { + tx_ring = &adapter->tx_ring[ring]; + dev_info(&netdev->dev, "ring=%d\n", ring); + dev_info(&netdev->dev, "crb_intr_mask=%d\n", + readl(tx_ring->crb_intr_mask)); + dev_info(&netdev->dev, "producer=%d\n", + readl(tx_ring->crb_cmd_producer)); + dev_info(&netdev->dev, "sw_consumer = %d\n", + tx_ring->sw_consumer); + dev_info(&netdev->dev, "hw_consumer = %d\n", + le32_to_cpu(*(tx_ring->hw_consumer))); + dev_info(&netdev->dev, "xmit-on=%llu\n", + tx_ring->xmit_on); + dev_info(&netdev->dev, "xmit-off=%llu\n", + tx_ring->xmit_off); + } + } adapter->ahw->reset_context = 1; } } @@ -3380,15 +3497,15 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) } int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter, - __u32 val) + __u32 val) { struct net_device *netdev = adapter->netdev; u8 max_hw = adapter->ahw->max_rx_ques; u32 max_allowed; - if (val > QLC_MAX_SDS_RINGS) { + if (val > QLCNIC_MAX_SDS_RINGS) { netdev_err(netdev, "RSS value should not be higher than %u\n", - QLC_MAX_SDS_RINGS); + QLCNIC_MAX_SDS_RINGS); return -EINVAL; } -- cgit v0.10.2 From aa4a1f7df7cbb98797c9f4edfde3c726e2b3841f Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 21 Aug 2013 11:24:11 -0400 Subject: qlcnic: Enable Tx queue changes using ethtool for 82xx Series adapter. o using ethtool {set|get}_channel option, user can change number of Tx queues for 82xx Series adapter. o updated ethtool -S option to display stats from each Tx queue. Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 2f9985f..101b538 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1531,8 +1531,9 @@ int qlcnic_reset_context(struct qlcnic_adapter *); void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); int qlcnic_diag_alloc_res(struct net_device *netdev, int test); netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); +int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int); int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); +int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, int); void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); int qlcnic_enable_msix(struct qlcnic_adapter *, u32); @@ -1679,7 +1680,7 @@ struct qlcnic_hardware_ops { int (*write_reg) (struct qlcnic_adapter *, ulong, u32); void (*get_ocm_win) (struct qlcnic_hardware_context *); int (*get_mac_address) (struct qlcnic_adapter *, u8 *); - int (*setup_intr) (struct qlcnic_adapter *, u8); + int (*setup_intr) (struct qlcnic_adapter *, u8, int); int (*alloc_mbx_args)(struct qlcnic_cmd_args *, struct qlcnic_adapter *, u32); int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); @@ -1745,9 +1746,10 @@ static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, return adapter->ahw->hw_ops->get_mac_address(adapter, mac); } -static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) +static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, + u8 num_intr, int txq) { - return adapter->ahw->hw_ops->setup_intr(adapter, num_intr); + return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq); } static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f0dc5d4..ea44828 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -261,7 +261,7 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, } } -int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq) { int err, i, num_msix; struct qlcnic_hardware_context *ahw = adapter->ahw; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index d4c58c6..bfd2741 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -523,7 +523,7 @@ enum qlc_83xx_ext_regs { /* 83xx funcitons */ int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); -int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8); +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int); void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f23e667..fb0ef36 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2201,7 +2201,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (err) goto detach_mbx; - err = qlcnic_setup_intr(adapter, 0); + err = qlcnic_setup_intr(adapter, 0, 0); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto disable_intr; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 79a5855..583dc2b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -125,6 +125,14 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = { }; #define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) + +static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = { + "xmit_on", + "xmit_off", + "xmit_called", + "xmit_finished", +}; + static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_rx_bytes", "ctx_rx_pkts", @@ -630,15 +638,15 @@ qlcnic_set_ringparam(struct net_device *dev, static void qlcnic_get_channels(struct net_device *dev, struct ethtool_channels *channel) { - int min; struct qlcnic_adapter *adapter = netdev_priv(dev); + int min; min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus()); channel->max_rx = rounddown_pow_of_two(min); - channel->max_tx = adapter->ahw->max_tx_ques; + channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus()); channel->rx_count = adapter->max_sds_rings; - channel->tx_count = adapter->ahw->max_tx_ques; + channel->tx_count = adapter->max_drv_tx_rings; } static int qlcnic_set_channels(struct net_device *dev, @@ -646,18 +654,27 @@ static int qlcnic_set_channels(struct net_device *dev, { struct qlcnic_adapter *adapter = netdev_priv(dev); int err; + int txq = 0; - if (channel->other_count || channel->combined_count || - channel->tx_count != channel->max_tx) + if (channel->other_count || channel->combined_count) return -EINVAL; - err = qlcnic_validate_max_rss(adapter, channel->rx_count); - if (err) - return err; + if (channel->rx_count) { + err = qlcnic_validate_max_rss(adapter, channel->rx_count); + if (err) + return err; + } + + if (channel->tx_count) { + err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count); + if (err) + return err; + txq = channel->tx_count; + } - err = qlcnic_set_max_rss(adapter, channel->rx_count, 0); - netdev_info(dev, "allocated 0x%x sds rings\n", - adapter->max_sds_rings); + err = qlcnic_set_max_rss(adapter, channel->rx_count, txq); + netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n", + adapter->max_sds_rings, adapter->max_drv_tx_rings); return err; } @@ -893,6 +910,7 @@ free_diag_res: clear_diag_irq: adapter->max_sds_rings = max_sds_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; } @@ -1077,11 +1095,21 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) QLCNIC_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: + num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings); + for (i = 0; i < adapter->max_drv_tx_rings; i++) { + for (index = 0; index < num_stats; index++) { + sprintf(data, "tx_ring_%d %s", i, + qlcnic_tx_ring_stats_strings[index]); + data += ETH_GSTRING_LEN; + } + } + for (index = 0; index < QLCNIC_STATS_LEN; index++) { memcpy(data + index * ETH_GSTRING_LEN, qlcnic_gstrings_stats[index].stat_string, ETH_GSTRING_LEN); } + if (qlcnic_83xx_check(adapter)) { num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings); for (i = 0; i < num_stats; i++, index++) @@ -1173,11 +1201,22 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_esw_statistics port_stats; struct qlcnic_mac_statistics mac_stats; - int index, ret, length, size; + int index, ret, length, size, ring; char *p; + memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64)); + for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) { + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + tx_ring = &adapter->tx_ring[ring]; + *data++ = tx_ring->xmit_on; + *data++ = tx_ring->xmit_off; + *data++ = tx_ring->xmit_called; + *data++ = tx_ring->xmit_finished; + } + } memset(data, 0, stats->n_stats * sizeof(u64)); length = QLCNIC_STATS_LEN; for (index = 0; index < length; index++) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index e9a2225..cf35220 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -150,7 +150,6 @@ struct ethtool_stats; struct pci_device_id; struct qlcnic_host_sds_ring; struct qlcnic_host_tx_ring; -struct qlcnic_host_tx_ring; struct qlcnic_hardware_context; struct qlcnic_adapter; @@ -174,7 +173,7 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8); void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32); -int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8); +int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int); irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index af2b2e2..94b3e82 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -656,7 +656,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) return err; } -int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) +int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq) { struct qlcnic_hardware_context *ahw = adapter->ahw; int num_msix, err = 0; @@ -667,8 +667,11 @@ int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) if (ahw->msix_supported) { num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), num_intr)); - if (qlcnic_check_multi_tx(adapter)) + if (qlcnic_check_multi_tx(adapter)) { + if (txq) + adapter->max_drv_tx_rings = txq; num_msix += adapter->max_drv_tx_rings; + } } else { num_msix = 1; } @@ -1990,11 +1993,9 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; - if (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter)) { - err = qlcnic_set_real_num_queues(adapter, netdev); - if (err) - return err; - } + err = qlcnic_set_real_num_queues(adapter, netdev); + if (err) + return err; err = register_netdev(netdev); if (err) { @@ -2253,7 +2254,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Device does not support MSI interrupts\n"); if (qlcnic_82xx_check(adapter)) { - err = qlcnic_setup_intr(adapter, 0); + err = qlcnic_setup_intr(adapter, 0, 0); if (err) { dev_err(&pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; @@ -3371,7 +3372,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev) qlcnic_clr_drv_state(adapter); kfree(adapter->msix_entries); adapter->msix_entries = NULL; - err = qlcnic_setup_intr(adapter, 0); + err = qlcnic_setup_intr(adapter, 0, 0); if (err) { kfree(adapter->msix_entries); @@ -3496,6 +3497,49 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) return err; } +int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, int txq) +{ + struct net_device *netdev = adapter->netdev; + u8 max_hw = QLCNIC_MAX_TX_RINGS; + u32 max_allowed; + + if (!qlcnic_82xx_check(adapter)) { + netdev_err(netdev, "No Multi TX-Q support\n"); + return -EINVAL; + } + + if (!qlcnic_use_msi_x && !qlcnic_use_msi) { + netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n"); + return -EINVAL; + } + + if (!qlcnic_check_multi_tx(adapter)) { + netdev_err(netdev, "No Multi TX-Q support\n"); + return -EINVAL; + } + + if (txq > QLCNIC_MAX_TX_RINGS) { + netdev_err(netdev, "Invalid ring count\n"); + return -EINVAL; + } + + max_allowed = rounddown_pow_of_two(min_t(int, max_hw, + num_online_cpus())); + if ((txq > max_allowed) || !is_power_of_2(txq)) { + if (!is_power_of_2(txq)) + netdev_err(netdev, + "TX queue should be a power of 2\n"); + if (txq > num_online_cpus()) + netdev_err(netdev, + "Tx queue should not be higher than [%u], number of online CPUs in the system\n", + num_online_cpus()); + netdev_err(netdev, "Unable to configure %u Tx rings\n", txq); + return -EINVAL; + } + + return 0; +} + int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter, __u32 val) { @@ -3503,6 +3547,12 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter, u8 max_hw = adapter->ahw->max_rx_ques; u32 max_allowed; + if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && + !qlcnic_use_msi) { + netdev_err(netdev, "No RSS support in INT-x mode\n"); + return -EINVAL; + } + if (val > QLCNIC_MAX_SDS_RINGS) { netdev_err(netdev, "RSS value should not be higher than %u\n", QLCNIC_MAX_SDS_RINGS); @@ -3535,27 +3585,48 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter, return 0; } -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len) +int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq) { int err; struct net_device *netdev = adapter->netdev; + int num_msix; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; + if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && + !qlcnic_use_msi) { + netdev_err(netdev, "No RSS support in INT-x mode\n"); + return -EINVAL; + } + netif_device_detach(netdev); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_detach(adapter); + if (qlcnic_82xx_check(adapter)) { + if (txq != 0) + adapter->max_drv_tx_rings = txq; + + if (qlcnic_check_multi_tx(adapter) && + (txq > adapter->max_drv_tx_rings)) + num_msix = adapter->max_drv_tx_rings; + else + num_msix = data; + } + if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_free_mbx_intr(adapter); qlcnic_83xx_enable_mbx_poll(adapter); } + netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings); + qlcnic_teardown_intr(adapter); - err = qlcnic_setup_intr(adapter, data); + + err = qlcnic_setup_intr(adapter, data, txq); if (err) { kfree(adapter->msix_entries); netdev_err(netdev, "failed to setup interrupt\n"); @@ -3583,8 +3654,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len) goto done; qlcnic_restore_indev_addr(netdev, NETDEV_UP); } - err = len; - done: +done: netif_device_attach(netdev); clear_bit(__QLCNIC_RESETTING, &adapter->state); return err; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index b2fbefc..2f79ec5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -512,7 +512,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, dev_warn(&adapter->pdev->dev, "Device does not support MSI interrupts\n"); - err = qlcnic_setup_intr(adapter, 1); + err = qlcnic_setup_intr(adapter, 1, 0); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; -- cgit v0.10.2 From c2c5e3a0681bb1945c0cb211a5f4baa22cb2cbb3 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 21 Aug 2013 11:24:12 -0400 Subject: qlcnic: Enable diagnostic test for multiple Tx queues. o Enable diagnostic test via ethtool and QConvergeConsole application when Multiple Tx queues are enabled on 82xx series adapters. Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 101b538..45a5a2a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1990,6 +1990,7 @@ static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) struct qlcnic_adapter *adapter = sds_ring->adapter; if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && (adapter->flags & QLCNIC_MSIX_ENABLED)) writel(0x1, sds_ring->crb_intr_mask); else @@ -2004,6 +2005,7 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) struct qlcnic_adapter *adapter = sds_ring->adapter; if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && (adapter->flags & QLCNIC_MSIX_ENABLED)) writel(0, sds_ring->crb_intr_mask); else diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index b8d9750..2b68779 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -295,7 +295,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) | QLCNIC_CAP0_VALIDOFF); cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); - if (qlcnic_check_multi_tx(adapter)) { + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) { cap |= QLCNIC_CAP0_TX_MULTI; } else { temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); @@ -338,7 +339,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); - if (qlcnic_check_multi_tx(adapter)) + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id); else prq_sds[i].msi_index = cpu_to_le16(i); @@ -374,7 +376,7 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { sds_ring = &recv_ctx->sds_rings[i]; reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); - if (qlcnic_check_multi_tx(adapter)) + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) reg2 = ahw->intr_tbl[i].src; else reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); @@ -464,13 +466,13 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | QLCNIC_CAP0_LSO); - if (qlcnic_check_multi_tx(adapter)) + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) temp |= QLCNIC_CAP0_TX_MULTI; prq->capabilities[0] = cpu_to_le32(temp); if (qlcnic_check_multi_tx(adapter) && - (adapter->max_drv_tx_rings > 1)) { + !adapter->ahw->diag_test) { temp_nsds_rings = adapter->max_sds_rings; index = temp_nsds_rings + ring; msix_id = ahw->intr_tbl[index].id; @@ -506,6 +508,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; tx_ring->ctx_id = le16_to_cpu(prsp->context_id); if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && (adapter->flags & QLCNIC_MSIX_ENABLED)) { index = adapter->max_sds_rings + ring; intr_mask = ahw->intr_tbl[index].src; @@ -681,13 +684,14 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) err_out: if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && - qlcnic_check_multi_tx(dev)) - qlcnic_82xx_config_intrpt(dev, 0); + qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) + qlcnic_82xx_config_intrpt(dev, 0); if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) qlcnic_83xx_config_intrpt(dev, 0); } + return err; } @@ -703,8 +707,9 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) if (qlcnic_82xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED) && - qlcnic_check_multi_tx(adapter)) - qlcnic_82xx_config_intrpt(adapter, 0); + qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) + qlcnic_82xx_config_intrpt(adapter, 0); if (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED)) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 583dc2b..7b0c90e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -984,6 +984,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) int qlcnic_loopback_test(struct net_device *netdev, u8 mode) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + int max_drv_tx_rings = adapter->max_drv_tx_rings; int max_sds_rings = adapter->max_sds_rings; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -1043,6 +1044,7 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode) clear_it: adapter->max_sds_rings = max_sds_rings; + adapter->max_drv_tx_rings = max_drv_tx_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index 240b49f..66c26cf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -242,7 +242,8 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) sds_ring->adapter = adapter; sds_ring->num_desc = adapter->num_rxd; if (qlcnic_82xx_check(adapter)) { - if (qlcnic_check_multi_tx(adapter)) + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) sds_ring->tx_ring = &adapter->tx_ring[ring]; else sds_ring->tx_ring = &adapter->tx_ring[0]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index f4b09f4..89f6dff 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -130,7 +130,8 @@ struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { - if (qlcnic_check_multi_tx(adapter)) + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) writel(0x0, tx_ring->crb_intr_mask); } @@ -138,7 +139,8 @@ inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter, static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { - if (qlcnic_check_multi_tx(adapter)) + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) writel(1, tx_ring->crb_intr_mask); } @@ -1466,6 +1468,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && (adapter->max_drv_tx_rings > 1)) { netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT * 2); @@ -1487,7 +1490,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, return -ENOMEM; } - if (qlcnic_check_multi_tx(adapter)) { + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, @@ -1512,7 +1515,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) qlcnic_free_sds_rings(adapter->recv_ctx); - if (qlcnic_check_multi_tx(adapter)) { + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_del(&tx_ring->napi); @@ -1540,6 +1543,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) if (qlcnic_check_multi_tx(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED) && + !adapter->ahw->diag_test && (adapter->max_drv_tx_rings > 1)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; @@ -1567,6 +1571,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) } if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !adapter->ahw->diag_test && qlcnic_check_multi_tx(adapter)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 94b3e82..25a858f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -581,6 +581,7 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) } else { adapter->ahw->num_msix = num_msix; if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && (adapter->max_drv_tx_rings > 1)) max_sds_rings = num_msix - max_tx_rings; else @@ -697,6 +698,7 @@ int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type) int err, i; if (qlcnic_check_multi_tx(adapter) && + !ahw->diag_test && (adapter->flags & QLCNIC_MSIX_ENABLED)) { ahw->intr_tbl = vzalloc(ahw->num_msix * sizeof(struct qlcnic_intrpt_config)); @@ -1752,6 +1754,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; + int max_tx_rings = adapter->max_drv_tx_rings; int ring; clear_bit(__QLCNIC_DEV_UP, &adapter->state); @@ -1768,6 +1771,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) adapter->ahw->diag_test = 0; adapter->max_sds_rings = max_sds_rings; + adapter->max_drv_tx_rings = max_tx_rings; if (qlcnic_attach(adapter)) goto out; @@ -1836,6 +1840,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) adapter->max_sds_rings = 1; adapter->ahw->diag_test = test; adapter->ahw->linkup = 0; + adapter->max_drv_tx_rings = 1; ret = qlcnic_attach(adapter); if (ret) { -- cgit v0.10.2 From 07a251c80cd11f5c1dec5be900c775d4264bfbe0 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Wed, 21 Aug 2013 11:24:13 -0400 Subject: qlcnic: Implement ndo_get_phys_port_id for 82xx adapter Each function driver instance uses the MAC address of the lowest function belonging to that physical port as a unique port identifier. This port identifier is read and cached in driver during probe and provided to user space through ndo_get_phys_port_id() Signed-off-by: Shahed Shaikh Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 45a5a2a..9d8f80e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -471,6 +471,7 @@ struct qlcnic_hardware_context { u32 mbox_reg[4]; struct qlcnic_mailbox *mailbox; u8 extend_lb_time; + u8 phys_port_id[ETH_ALEN]; }; struct qlcnic_adapter_stats { @@ -926,6 +927,8 @@ struct qlcnic_ipaddr { #define QLCNIC_FW_LRO_MSS_CAP 0x8000 #define QLCNIC_TX_INTR_SHARED 0x10000 #define QLCNIC_APP_CHANGED_FLAGS 0x20000 +#define QLCNIC_HAS_PHYS_PORT_ID 0x40000 + #define QLCNIC_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) #define QLCNIC_IS_TSO_CAPABLE(adapter) \ @@ -1510,6 +1513,7 @@ void __qlcnic_set_multi(struct net_device *, u16); int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16); int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); +int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *); int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32); @@ -1679,7 +1683,7 @@ struct qlcnic_hardware_ops { int (*read_reg) (struct qlcnic_adapter *, ulong, int *); int (*write_reg) (struct qlcnic_adapter *, ulong, u32); void (*get_ocm_win) (struct qlcnic_hardware_context *); - int (*get_mac_address) (struct qlcnic_adapter *, u8 *); + int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8); int (*setup_intr) (struct qlcnic_adapter *, u8, int); int (*alloc_mbx_args)(struct qlcnic_cmd_args *, struct qlcnic_adapter *, u32); @@ -1713,6 +1717,7 @@ struct qlcnic_hardware_ops { int (*get_board_info) (struct qlcnic_adapter *); void (*set_mac_filter_count) (struct qlcnic_adapter *); void (*free_mac_list) (struct qlcnic_adapter *); + int (*read_phys_port_id) (struct qlcnic_adapter *); }; extern struct qlcnic_nic_template qlcnic_vf_ops; @@ -1741,9 +1746,9 @@ static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, } static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, - u8 *mac) + u8 *mac, u8 function) { - return adapter->ahw->hw_ops->get_mac_address(adapter, mac); + return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function); } static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, @@ -1940,6 +1945,12 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) adapter->ahw->hw_ops->set_mac_filter_count(adapter); } +static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->hw_ops->read_phys_port_id) + adapter->ahw->hw_ops->read_phys_port_id(adapter); +} + static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index ea44828..6c059f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2037,12 +2037,14 @@ void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac, cmd->req.arg[1] = type; } -int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) +int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, + u8 function) { int err, i; struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; + function = 0; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); if (err) return err; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index bfd2741..0fc5616 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -564,7 +564,7 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int); void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool); int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); -int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *); +int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8); void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, struct qlcnic_cmd_args *); int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 2b68779..d4f0e95 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -821,7 +821,8 @@ int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) return err; } -int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) +int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, + u8 function) { int err, i; struct qlcnic_cmd_args cmd; @@ -831,7 +832,7 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) if (err) return err; - cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; + cmd.req.arg[1] = function | BIT_8; err = qlcnic_issue_cmd(adapter, &cmd); if (err == QLCNIC_RCODE_SUCCESS) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 9e49c15..f8adc7b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -740,6 +740,22 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) return 0; } +int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter) +{ + u8 mac[ETH_ALEN]; + int ret; + + ret = qlcnic_get_mac_address(adapter, mac, + adapter->ahw->physical_port); + if (ret) + return ret; + + memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN); + adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID; + + return 0; +} + /* * Send the interrupt coalescing parameter set by ethtool to the card. */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index cf35220..786366c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -186,7 +186,7 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *); void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *, struct qlcnic_host_tx_ring *); int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); -int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*); +int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8); int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 25a858f..8321d1a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -284,12 +284,15 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) { - u8 mac_addr[ETH_ALEN]; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; + u8 mac_addr[ETH_ALEN]; + int ret; - if (qlcnic_get_mac_address(adapter, mac_addr) != 0) - return -EIO; + ret = qlcnic_get_mac_address(adapter, mac_addr, + adapter->ahw->pci_func); + if (ret) + return ret; memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); @@ -431,6 +434,21 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) cancel_delayed_work_sync(&adapter->fw_work); } +static int qlcnic_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_port_id *ppid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID)) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(ahw->phys_port_id); + memcpy(ppid->id, ahw->phys_port_id, ppid->id_len); + + return 0; +} + static const struct net_device_ops qlcnic_netdev_ops = { .ndo_open = qlcnic_open, .ndo_stop = qlcnic_close, @@ -448,6 +466,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_fdb_add = qlcnic_fdb_add, .ndo_fdb_del = qlcnic_fdb_del, .ndo_fdb_dump = qlcnic_fdb_dump, + .ndo_get_phys_port_id = qlcnic_get_phys_port_id, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, #endif @@ -520,6 +539,7 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .get_board_info = qlcnic_82xx_get_board_info, .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, + .read_phys_port_id = qlcnic_82xx_read_phys_port_id, }; static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter) @@ -2245,6 +2265,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (qlcnic_read_mac_addr(adapter)) dev_warn(&pdev->dev, "failed to read mac addr\n"); + qlcnic_read_phys_port_id(adapter); + if (adapter->portnum == 0) { qlcnic_get_board_name(adapter, board_name); -- cgit v0.10.2 From f6ae5acff5031ca313e29c5006c16e3020d74d23 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 21 Aug 2013 11:24:14 -0400 Subject: qlcnic: Update version to 5.3.48 Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 9d8f80e..3f03856 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 47 -#define QLCNIC_LINUX_VERSIONID "5.3.47" +#define _QLCNIC_LINUX_SUBVERSION 48 +#define QLCNIC_LINUX_VERSIONID "5.3.48" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v0.10.2 From 86094f7f38ff711f3db8497fcb4d2e109100f497 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 21 Aug 2013 19:51:04 +0100 Subject: sfc: Move and rename Falcon/Siena common NIC operations Add efx_nic_type operations for the many efx_nic functions that need to be implemented different on EF10. For now, change most of the existing efx_nic_*() functions into inline wrappers. As a later step, we may be able to improve branch prediction for operations used on the fast path by copying the pointers into each queue/channel structure. Move the Falcon/Siena implementations to new file farch.c and rename the functions and static data to use a prefix of 'efx_farch_'. Move efx_may_push_tx_desc() to nic.h, as the EF10 TX code will also use it. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 5b31d8a..ef7410f 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -1,4 +1,5 @@ -sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ +sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ + filter.o \ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ tenxpress.o txc43128_phy.o falcon_boards.o \ mcdi.o mcdi_port.o mcdi_mon.o ptp.o diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 9c6555c..872b9f5 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1386,7 +1386,7 @@ static void efx_enable_interrupts(struct efx_nic *efx) efx->eeh_disabled_legacy_irq = false; } - efx_nic_enable_interrupts(efx); + efx->type->irq_enable_master(efx); efx_for_each_channel(channel, efx) { if (channel->type->keep_eventq) @@ -1407,7 +1407,7 @@ static void efx_disable_interrupts(struct efx_nic *efx) efx_fini_eventq(channel); } - efx_nic_disable_interrupts(efx); + efx->type->irq_disable_non_ev(efx); } static void efx_remove_interrupts(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index c8efcb0..fe83c26 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -346,7 +346,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) } -irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) +static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) { struct efx_nic *efx = dev_id; efx_oword_t *int_ker = efx->irq_status.addr; @@ -373,7 +373,7 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) /* Check to see if we have a serious error condition */ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) - return efx_nic_fatal_interrupt(efx); + return efx_farch_fatal_interrupt(efx); /* Determine interrupting queues, clear interrupt status * register and acknowledge the device interrupt. @@ -1558,7 +1558,7 @@ static int falcon_test_nvram(struct efx_nic *efx) return falcon_read_nvram(efx, NULL); } -static const struct efx_nic_register_test falcon_b0_register_tests[] = { +static const struct efx_farch_register_test falcon_b0_register_tests[] = { { FR_AZ_ADR_REGION, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, { FR_AZ_RX_CFG, @@ -1618,8 +1618,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) efx_reset_down(efx, reset_method); tests->registers = - efx_nic_test_registers(efx, falcon_b0_register_tests, - ARRAY_SIZE(falcon_b0_register_tests)) + efx_farch_test_registers(efx, falcon_b0_register_tests, + ARRAY_SIZE(falcon_b0_register_tests)) ? -1 : 1; rc = falcon_reset_hw(efx, reset_method); @@ -1984,7 +1984,7 @@ static int falcon_probe_nic(struct efx_nic *efx) rc = -ENODEV; - if (efx_nic_fpga_ver(efx) != 0) { + if (efx_farch_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Falcon FPGA not supported\n"); goto fail1; @@ -2218,7 +2218,7 @@ static int falcon_init_nic(struct efx_nic *efx) efx_writeo(efx, &temp, FR_BZ_DP_CTRL); } - efx_nic_init_common(efx); + efx_farch_init_common(efx); return 0; } @@ -2367,6 +2367,28 @@ const struct efx_nic_type falcon_a1_nic_type = { .set_wol = falcon_set_wol, .resume_wol = efx_port_dummy_op_void, .test_nvram = falcon_test_nvram, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = falcon_legacy_interrupt_a1, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .rx_push_indir_table = efx_farch_rx_push_indir_table, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, .revision = EFX_REV_FALCON_A1, .mem_map_size = 0x20000, @@ -2414,6 +2436,28 @@ const struct efx_nic_type falcon_b0_nic_type = { .resume_wol = efx_port_dummy_op_void, .test_chip = falcon_b0_test_chip, .test_nvram = falcon_test_nvram, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = efx_farch_legacy_interrupt, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .rx_push_indir_table = efx_farch_rx_push_indir_table, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, .revision = EFX_REV_FALCON_B0, /* Map everything up to and including the RSS indirection diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c new file mode 100644 index 0000000..c3d07c5 --- /dev/null +++ b/drivers/net/ethernet/sfc/farch.c @@ -0,0 +1,1781 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "farch_regs.h" +#include "io.h" +#include "workarounds.h" + +/* Falcon-architecture (SFC4000 and SFC9000-family) support */ + +/************************************************************************** + * + * Configurable values + * + ************************************************************************** + */ + +/* This is set to 16 for a good reason. In summary, if larger than + * 16, the descriptor cache holds more than a default socket + * buffer's worth of packets (for UDP we can only have at most one + * socket buffer's worth outstanding). This combined with the fact + * that we only get 1 TX event per descriptor cache means the NIC + * goes idle. + */ +#define TX_DC_ENTRIES 16 +#define TX_DC_ENTRIES_ORDER 1 + +#define RX_DC_ENTRIES 64 +#define RX_DC_ENTRIES_ORDER 3 + +/* If EFX_MAX_INT_ERRORS internal errors occur within + * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and + * disable it. + */ +#define EFX_INT_ERROR_EXPIRE 3600 +#define EFX_MAX_INT_ERRORS 5 + +/* Depth of RX flush request fifo */ +#define EFX_RX_FLUSH_COUNT 4 + +/* Driver generated events */ +#define _EFX_CHANNEL_MAGIC_TEST 0x000101 +#define _EFX_CHANNEL_MAGIC_FILL 0x000102 +#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 +#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 + +#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) +#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) + +#define EFX_CHANNEL_MAGIC_TEST(_channel) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) +#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ + (_tx_queue)->queue) + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); + +/************************************************************************** + * + * Hardware access + * + **************************************************************************/ + +static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, + unsigned int index) +{ + efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, + value, index); +} + +static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, + const efx_oword_t *mask) +{ + return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || + ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); +} + +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs) +{ + unsigned address = 0, i, j; + efx_oword_t mask, imask, original, reg, buf; + + for (i = 0; i < n_regs; ++i) { + address = regs[i].address; + mask = imask = regs[i].mask; + EFX_INVERT_OWORD(imask); + + efx_reado(efx, &original, address); + + /* bit sweep on and off */ + for (j = 0; j < 128; j++) { + if (!EFX_EXTRACT_OWORD32(mask, j, j)) + continue; + + /* Test this testable bit can be set in isolation */ + EFX_AND_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 1); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + + /* Test this testable bit can be cleared in isolation */ + EFX_OR_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 0); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + } + + efx_writeo(efx, &original, address); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, + "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT + " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), + EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); + return -EIO; +} + +/************************************************************************** + * + * Special buffer handling + * Special buffers are used for event queues and the TX and RX + * descriptor rings. + * + *************************************************************************/ + +/* + * Initialise a special buffer + * + * This will define a buffer (previously allocated via + * efx_alloc_special_buffer()) in the buffer table, allowing + * it to be used for event queues, descriptor rings etc. + */ +static void +efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_qword_t buf_desc; + unsigned int index; + dma_addr_t dma_addr; + int i; + + EFX_BUG_ON_PARANOID(!buffer->buf.addr); + + /* Write buffer descriptors to NIC */ + for (i = 0; i < buffer->entries; i++) { + index = buffer->index + i; + dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); + netif_dbg(efx, probe, efx->net_dev, + "mapping special buffer %d at %llx\n", + index, (unsigned long long)dma_addr); + EFX_POPULATE_QWORD_3(buf_desc, + FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + efx_write_buf_tbl(efx, &buf_desc, index); + } +} + +/* Unmaps a buffer and clears the buffer table entries */ +static void +efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_oword_t buf_tbl_upd; + unsigned int start = buffer->index; + unsigned int end = (buffer->index + buffer->entries - 1); + + if (!buffer->entries) + return; + + netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", + buffer->index, buffer->index + buffer->entries - 1); + + EFX_POPULATE_OWORD_4(buf_tbl_upd, + FRF_AZ_BUF_UPD_CMD, 0, + FRF_AZ_BUF_CLR_CMD, 1, + FRF_AZ_BUF_CLR_END_ID, end, + FRF_AZ_BUF_CLR_START_ID, start); + efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); +} + +/* + * Allocate a new special buffer + * + * This allocates memory for a new buffer, clears it and allocates a + * new buffer ID range. It does not write into the buffer table. + * + * This call will allocate 4KB buffers, since 8KB buffers can't be + * used for event queues and descriptor rings. + */ +static int efx_alloc_special_buffer(struct efx_nic *efx, + struct efx_special_buffer *buffer, + unsigned int len) +{ + len = ALIGN(len, EFX_BUF_SIZE); + + if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) + return -ENOMEM; + buffer->entries = len / EFX_BUF_SIZE; + BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); + + /* Select new buffer ID */ + buffer->index = efx->next_buffer_table; + efx->next_buffer_table += buffer->entries; +#ifdef CONFIG_SFC_SRIOV + BUG_ON(efx_sriov_enabled(efx) && + efx->vf_buftbl_base < efx->next_buffer_table); +#endif + + netif_dbg(efx, probe, efx->net_dev, + "allocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + return 0; +} + +static void +efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + if (!buffer->buf.addr) + return; + + netif_dbg(efx, hw, efx->net_dev, + "deallocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, buffer->buf.len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + efx_nic_free_buffer(efx, &buffer->buf); + buffer->entries = 0; +} + +/************************************************************************** + * + * TX path + * + **************************************************************************/ + +/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ +static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned write_ptr; + efx_dword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(tx_queue->efx, ®, + FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); +} + +/* Write pointer and first descriptor for TX descriptor ring */ +static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, + const efx_qword_t *txd) +{ + unsigned write_ptr; + efx_oword_t reg; + + BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); + BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, + FRF_AZ_TX_DESC_WPTR, write_ptr); + reg.qword[0] = *txd; + efx_writeo_page(tx_queue->efx, ®, + FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); +} + + +/* For each entry inserted into the software descriptor ring, create a + * descriptor in the hardware TX descriptor ring (in host memory), and + * write a doorbell. + */ +void efx_farch_tx_write(struct efx_tx_queue *tx_queue) +{ + + struct efx_tx_buffer *buffer; + efx_qword_t *txd; + unsigned write_ptr; + unsigned old_write_count = tx_queue->write_count; + + BUG_ON(tx_queue->write_count == tx_queue->insert_count); + + do { + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + txd = efx_tx_desc(tx_queue, write_ptr); + ++tx_queue->write_count; + + /* Create TX descriptor ring entry */ + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); + EFX_POPULATE_QWORD_4(*txd, + FSF_AZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, + FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, + FSF_AZ_TX_KER_BUF_REGION, 0, + FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); + } while (tx_queue->write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { + txd = efx_tx_desc(tx_queue, + old_write_count & tx_queue->ptr_mask); + efx_farch_push_tx_desc(tx_queue, txd); + ++tx_queue->pushes; + } else { + efx_farch_notify_tx_desc(tx_queue); + } +} + +/* Allocate hardware resources for a TX queue */ +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned entries; + + entries = tx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &tx_queue->txd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_tx_init(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t reg; + + /* Pin TX descriptor ring */ + efx_init_special_buffer(efx, &tx_queue->txd); + + /* Push TX descriptor ring to card */ + EFX_POPULATE_OWORD_10(reg, + FRF_AZ_TX_DESCQ_EN, 1, + FRF_AZ_TX_ISCSI_DDIG_EN, 0, + FRF_AZ_TX_ISCSI_HDIG_EN, 0, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, + FRF_AZ_TX_DESCQ_EVQ_ID, + tx_queue->channel->channel, + FRF_AZ_TX_DESCQ_OWNER_ID, 0, + FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, + FRF_AZ_TX_DESCQ_SIZE, + __ffs(tx_queue->txd.entries), + FRF_AZ_TX_DESCQ_TYPE, 0, + FRF_BZ_TX_NON_IP_DROP_DIS, 1); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, + !csum); + } + + efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { + /* Only 128 bits in this register */ + BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); + + efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + __clear_bit_le(tx_queue->queue, ®); + else + __set_bit_le(tx_queue->queue, ®); + efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); + } + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_1(reg, + FRF_BZ_TX_PACE, + (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + FFE_BZ_TX_PACE_OFF : + FFE_BZ_TX_PACE_RESERVED); + efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, + tx_queue->queue); + } +} + +static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_flush_descq; + + WARN_ON(atomic_read(&tx_queue->flush_outstanding)); + atomic_set(&tx_queue->flush_outstanding, 1); + + EFX_POPULATE_OWORD_2(tx_flush_descq, + FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); + efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); +} + +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_desc_ptr; + + /* Remove TX descriptor ring from card */ + EFX_ZERO_OWORD(tx_desc_ptr); + efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + /* Unpin TX descriptor ring */ + efx_fini_special_buffer(efx, &tx_queue->txd); +} + +/* Free buffers backing TX queue */ +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) +{ + efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); +} + +/************************************************************************** + * + * RX path + * + **************************************************************************/ + +/* This creates an entry in the RX descriptor queue */ +static inline void +efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_rx_buffer *rx_buf; + efx_qword_t *rxd; + + rxd = efx_rx_desc(rx_queue, index); + rx_buf = efx_rx_buffer(rx_queue, index); + EFX_POPULATE_QWORD_3(*rxd, + FSF_AZ_RX_KER_BUF_SIZE, + rx_buf->len - + rx_queue->efx->type->rx_buffer_padding, + FSF_AZ_RX_KER_BUF_REGION, 0, + FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); +} + +/* This writes to the RX_DESC_WPTR register for the specified receive + * descriptor ring. + */ +void efx_farch_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_dword_t reg; + unsigned write_ptr; + + while (rx_queue->notified_count != rx_queue->added_count) { + efx_farch_build_rx_desc( + rx_queue, + rx_queue->notified_count & rx_queue->ptr_mask); + ++rx_queue->notified_count; + } + + wmb(); + write_ptr = rx_queue->added_count & rx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, + efx_rx_queue_index(rx_queue)); +} + +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned entries; + + entries = rx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &rx_queue->rxd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_rx_init(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; + bool iscsi_digest_en = is_b0; + bool jumbo_en; + + /* For kernel-mode queues in Falcon A1, the JUMBO flag enables + * DMA to continue after a PCIe page boundary (and scattering + * is not possible). In Falcon B0 and Siena, it enables + * scatter. + */ + jumbo_en = !is_b0 || efx->rx_scatter; + + netif_dbg(efx, hw, efx->net_dev, + "RX queue %d ring in special buffers %d-%d\n", + efx_rx_queue_index(rx_queue), rx_queue->rxd.index, + rx_queue->rxd.index + rx_queue->rxd.entries - 1); + + rx_queue->scatter_n = 0; + + /* Pin RX descriptor ring */ + efx_init_special_buffer(efx, &rx_queue->rxd); + + /* Push RX descriptor ring to card */ + EFX_POPULATE_OWORD_10(rx_desc_ptr, + FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, + FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, + FRF_AZ_RX_DESCQ_EVQ_ID, + efx_rx_queue_channel(rx_queue)->channel, + FRF_AZ_RX_DESCQ_OWNER_ID, 0, + FRF_AZ_RX_DESCQ_LABEL, + efx_rx_queue_index(rx_queue), + FRF_AZ_RX_DESCQ_SIZE, + __ffs(rx_queue->rxd.entries), + FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , + FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, + FRF_AZ_RX_DESCQ_EN, 1); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); +} + +static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_oword_t rx_flush_descq; + + EFX_POPULATE_OWORD_2(rx_flush_descq, + FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_RX_FLUSH_DESCQ, + efx_rx_queue_index(rx_queue)); + efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); +} + +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + + /* Remove RX descriptor ring from card */ + EFX_ZERO_OWORD(rx_desc_ptr); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); + + /* Unpin RX descriptor ring */ + efx_fini_special_buffer(efx, &rx_queue->rxd); +} + +/* Free buffers backing RX queue */ +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) +{ + efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); +} + +/************************************************************************** + * + * Flush handling + * + **************************************************************************/ + +/* efx_farch_flush_queues() must be woken up when all flushes are completed, + * or more RX flushes can be kicked off. + */ +static bool efx_farch_flush_wake(struct efx_nic *efx) +{ + /* Ensure that all updates are visible to efx_farch_flush_queues() */ + smp_mb(); + + return (atomic_read(&efx->drain_pending) == 0 || + (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT + && atomic_read(&efx->rxq_flush_pending) > 0)); +} + +static bool efx_check_tx_flush_complete(struct efx_nic *efx) +{ + bool i = true; + efx_oword_t txd_ptr_tbl; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_reado_table(efx, &txd_ptr_tbl, + FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); + if (EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_FLUSH) || + EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_EN)) { + netif_dbg(efx, hw, efx->net_dev, + "flush did not complete on TXQ %d\n", + tx_queue->queue); + i = false; + } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, + 1, 0)) { + /* The flush is complete, but we didn't + * receive a flush completion event + */ + netif_dbg(efx, hw, efx->net_dev, + "flush complete on TXQ %d, so drain " + "the queue\n", tx_queue->queue); + /* Don't need to increment drain_pending as it + * has already been incremented for the queues + * which did not drain + */ + efx_farch_magic_event(channel, + EFX_CHANNEL_MAGIC_TX_DRAIN( + tx_queue)); + } + } + } + + return i; +} + +/* Flush all the transmit queues, and continue flushing receive queues until + * they're all flushed. Wait for the DRAIN events to be recieved so that there + * are no more RX and TX events left on any channel. */ +static int efx_farch_do_flush(struct efx_nic *efx) +{ + unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int rc = 0; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + atomic_inc(&efx->drain_pending); + efx_farch_flush_tx_queue(tx_queue); + } + efx_for_each_channel_rx_queue(rx_queue, channel) { + atomic_inc(&efx->drain_pending); + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } + } + + while (timeout && atomic_read(&efx->drain_pending) > 0) { + /* If SRIOV is enabled, then offload receive queue flushing to + * the firmware (though we will still have to poll for + * completion). If that fails, fall back to the old scheme. + */ + if (efx_sriov_enabled(efx)) { + rc = efx_mcdi_flush_rxqs(efx); + if (!rc) + goto wait; + } + + /* The hardware supports four concurrent rx flushes, each of + * which may need to be retried if there is an outstanding + * descriptor fetch + */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (atomic_read(&efx->rxq_flush_outstanding) >= + EFX_RX_FLUSH_COUNT) + break; + + if (rx_queue->flush_pending) { + rx_queue->flush_pending = false; + atomic_dec(&efx->rxq_flush_pending); + atomic_inc(&efx->rxq_flush_outstanding); + efx_farch_flush_rx_queue(rx_queue); + } + } + } + + wait: + timeout = wait_event_timeout(efx->flush_wq, + efx_farch_flush_wake(efx), + timeout); + } + + if (atomic_read(&efx->drain_pending) && + !efx_check_tx_flush_complete(efx)) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " + "(rx %d+%d)\n", atomic_read(&efx->drain_pending), + atomic_read(&efx->rxq_flush_outstanding), + atomic_read(&efx->rxq_flush_pending)); + rc = -ETIMEDOUT; + + atomic_set(&efx->drain_pending, 0); + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + } + + return rc; +} + +int efx_farch_fini_dmaq(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc = 0; + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + /* Only perform flush if DMA is enabled */ + if (efx->pci_dev->is_busmaster) { + efx->type->prepare_flush(efx); + rc = efx_farch_do_flush(efx); + efx->type->finish_flush(efx); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_farch_rx_fini(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_farch_tx_fini(tx_queue); + } + } + + return rc; +} + +/************************************************************************** + * + * Event queue processing + * Event queues are processed by per-channel tasklets. + * + **************************************************************************/ + +/* Update a channel's event queue's read pointer (RPTR) register + * + * This writes the EVQ_RPTR_REG register for the specified channel's + * event queue. + */ +void efx_farch_ev_read_ack(struct efx_channel *channel) +{ + efx_dword_t reg; + struct efx_nic *efx = channel->efx; + + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, + channel->eventq_read_ptr & channel->eventq_mask); + + /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size + * of 4 bytes, but it is really 16 bytes just like later revisions. + */ + efx_writed(efx, ®, + efx->type->evq_rptr_tbl_base + + FR_BZ_EVQ_RPTR_STEP * channel->channel); +} + +/* Use HW to insert a SW defined event */ +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event) +{ + efx_oword_t drv_ev_reg; + + BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || + FRF_AZ_DRV_EV_DATA_WIDTH != 64); + drv_ev_reg.u32[0] = event->u32[0]; + drv_ev_reg.u32[1] = event->u32[1]; + drv_ev_reg.u32[2] = 0; + drv_ev_reg.u32[3] = 0; + EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); + efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); +} + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) +{ + efx_qword_t event; + + EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, + FSE_AZ_EV_CODE_DRV_GEN_EV, + FSF_AZ_DRV_GEN_EV_MAGIC, magic); + efx_farch_generate_event(channel->efx, channel->channel, &event); +} + +/* Handle a transmit completion event + * + * The NIC batches TX completion events; the message we receive is of + * the form "complete all TX events up to this index". + */ +static int +efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) +{ + unsigned int tx_ev_desc_ptr; + unsigned int tx_ev_q_label; + struct efx_tx_queue *tx_queue; + struct efx_nic *efx = channel->efx; + int tx_packets = 0; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return 0; + + if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { + /* Transmit completion */ + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = efx_channel_get_tx_queue( + channel, tx_ev_q_label % EFX_TXQ_TYPES); + tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & + tx_queue->ptr_mask); + efx_xmit_done(tx_queue, tx_ev_desc_ptr); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { + /* Rewrite the FIFO write pointer */ + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = efx_channel_get_tx_queue( + channel, tx_ev_q_label % EFX_TXQ_TYPES); + + netif_tx_lock(efx->net_dev); + efx_farch_notify_tx_desc(tx_queue); + netif_tx_unlock(efx->net_dev); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && + EFX_WORKAROUND_10727(efx)) { + efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); + } else { + netif_err(efx, tx_err, efx->net_dev, + "channel %d unexpected TX event " + EFX_QWORD_FMT"\n", channel->channel, + EFX_QWORD_VAL(*event)); + } + + return tx_packets; +} + +/* Detect errors included in the rx_evt_pkt_ok bit. */ +static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, + const efx_qword_t *event) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; + bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; + bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; + bool rx_ev_other_err, rx_ev_pause_frm; + bool rx_ev_hdr_type, rx_ev_mcast_pkt; + unsigned rx_ev_pkt_type; + + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); + rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); + rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); + rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); + rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); + rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); + rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); + rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); + rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); + rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? + 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); + rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); + + /* Every error apart from tobe_disc and pause_frm */ + rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | + rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | + rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); + + /* Count errors that are not in MAC stats. Ignore expected + * checksum errors during self-test. */ + if (rx_ev_frm_trunc) + ++channel->n_rx_frm_trunc; + else if (rx_ev_tobe_disc) + ++channel->n_rx_tobe_disc; + else if (!efx->loopback_selftest) { + if (rx_ev_ip_hdr_chksum_err) + ++channel->n_rx_ip_hdr_chksum_err; + else if (rx_ev_tcp_udp_chksum_err) + ++channel->n_rx_tcp_udp_chksum_err; + } + + /* TOBE_DISC is expected on unicast mismatches; don't print out an + * error message. FRM_TRUNC indicates RXDP dropped the packet due + * to a FIFO overflow. + */ +#ifdef DEBUG + if (rx_ev_other_err && net_ratelimit()) { + netif_dbg(efx, rx_err, efx->net_dev, + " RX queue %d unexpected RX event " + EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", + efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), + rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", + rx_ev_ip_hdr_chksum_err ? + " [IP_HDR_CHKSUM_ERR]" : "", + rx_ev_tcp_udp_chksum_err ? + " [TCP_UDP_CHKSUM_ERR]" : "", + rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", + rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", + rx_ev_drib_nib ? " [DRIB_NIB]" : "", + rx_ev_tobe_disc ? " [TOBE_DISC]" : "", + rx_ev_pause_frm ? " [PAUSE]" : ""); + } +#endif + + /* The frame must be discarded if any of these are true. */ + return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | + rx_ev_tobe_disc | rx_ev_pause_frm) ? + EFX_RX_PKT_DISCARD : 0; +} + +/* Handle receive events that are not in-order. Return true if this + * can be handled as a partial packet discard, false if it's more + * serious. + */ +static bool +efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + unsigned expected, dropped; + + if (rx_queue->scatter_n && + index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & + rx_queue->ptr_mask)) { + ++channel->n_rx_nodesc_trunc; + return true; + } + + expected = rx_queue->removed_count & rx_queue->ptr_mask; + dropped = (index - expected) & rx_queue->ptr_mask; + netif_info(efx, rx_err, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, index, expected); + + efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? + RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); + return false; +} + +/* Handle a packet received event + * + * The NIC gives a "discard" flag if it's a unicast packet with the + * wrong destination address + * Also "is multicast" and "matches multicast filter" flags can be used to + * discard non-matching multicast packets. + */ +static void +efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) +{ + unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; + unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; + unsigned expected_ptr; + bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; + u16 flags; + struct efx_rx_queue *rx_queue; + struct efx_nic *efx = channel->efx; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return; + + rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); + rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); + WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != + channel->channel); + + rx_queue = efx_channel_get_rx_queue(channel); + + rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); + expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & + rx_queue->ptr_mask); + + /* Check for partial drops and other errors */ + if (unlikely(rx_ev_desc_ptr != expected_ptr) || + unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { + if (rx_ev_desc_ptr != expected_ptr && + !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) + return; + + /* Discard all pending fragments */ + if (rx_queue->scatter_n) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + } + + /* Return if there is no new fragment */ + if (rx_ev_desc_ptr != expected_ptr) + return; + + /* Discard new fragment if not SOP */ + if (!rx_ev_sop) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + 1, 0, EFX_RX_PKT_DISCARD); + ++rx_queue->removed_count; + return; + } + } + + ++rx_queue->scatter_n; + if (rx_ev_cont) + return; + + rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); + rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); + + if (likely(rx_ev_pkt_ok)) { + /* If packet is marked as OK then we can rely on the + * hardware checksum and classification. + */ + flags = 0; + switch (rx_ev_hdr_type) { + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: + flags |= EFX_RX_PKT_TCP; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: + flags |= EFX_RX_PKT_CSUMMED; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: + case FSE_AZ_RX_EV_HDR_TYPE_OTHER: + break; + } + } else { + flags = efx_farch_handle_rx_not_ok(rx_queue, event); + } + + /* Detect multicast packets that didn't match the filter */ + rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); + if (rx_ev_mcast_pkt) { + unsigned int rx_ev_mcast_hash_match = + EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); + + if (unlikely(!rx_ev_mcast_hash_match)) { + ++channel->n_rx_mcast_mismatch; + flags |= EFX_RX_PKT_DISCARD; + } + } + + channel->irq_mod_score += 2; + + /* Handle received packet */ + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_ev_byte_cnt, flags); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; +} + +/* If this flush done event corresponds to a &struct efx_tx_queue, then + * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue + * of all transmit completions. + */ +static void +efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_tx_queue *tx_queue; + int qid; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { + tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, + qid % EFX_TXQ_TYPES); + if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { + efx_farch_magic_event(tx_queue->channel, + EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); + } + } +} + +/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush + * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add + * the RX queue back to the mask of RX queues in need of flushing. + */ +static void +efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + int qid; + bool failed; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + if (qid >= efx->n_channels) + return; + channel = efx_get_channel(efx, qid); + if (!efx_channel_has_rx_queue(channel)) + return; + rx_queue = efx_channel_get_rx_queue(channel); + + if (failed) { + netif_info(efx, hw, efx->net_dev, + "RXQ %d flush retry\n", qid); + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } else { + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); + } + atomic_dec(&efx->rxq_flush_outstanding); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void +efx_farch_handle_drain_event(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + WARN_ON(atomic_read(&efx->drain_pending) == 0); + atomic_dec(&efx->drain_pending); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void efx_farch_handle_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue = + efx_channel_has_rx_queue(channel) ? + efx_channel_get_rx_queue(channel) : NULL; + unsigned magic, code; + + magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); + code = _EFX_CHANNEL_MAGIC_CODE(magic); + + if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { + channel->event_test_cpu = raw_smp_processor_id(); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here */ + efx_fast_push_rx_descriptors(rx_queue); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { + efx_farch_handle_drain_event(channel); + } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { + efx_farch_handle_drain_event(channel); + } else { + netif_dbg(efx, hw, efx->net_dev, "channel %d received " + "generated event "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(*event)); + } +} + +static void +efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + unsigned int ev_sub_code; + unsigned int ev_sub_data; + + ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); + ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + + switch (ev_sub_code) { + case FSE_AZ_TX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_tx_flush_done(efx, event); + efx_sriov_tx_flush_done(efx, event); + break; + case FSE_AZ_RX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_rx_flush_done(efx, event); + efx_sriov_rx_flush_done(efx, event); + break; + case FSE_AZ_EVQ_INIT_DONE_EV: + netif_dbg(efx, hw, efx->net_dev, + "channel %d EVQ %d initialised\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_SRM_UPD_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d SRAM update done\n", channel->channel); + break; + case FSE_AZ_WAKE_UP_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RXQ %d wakeup event\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_TIMER_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RX queue %d timer expired\n", + channel->channel, ev_sub_data); + break; + case FSE_AA_RX_RECOVER_EV: + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen DRIVER RX_RESET event. " + "Resetting.\n", channel->channel); + atomic_inc(&efx->rx_reset); + efx_schedule_reset(efx, + EFX_WORKAROUND_6555(efx) ? + RESET_TYPE_RX_RECOVERY : + RESET_TYPE_DISABLE); + break; + case FSE_BZ_RX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, rx_err, efx->net_dev, + "RX DMA Q %d reports descriptor fetch error." + " RX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); + } else + efx_sriov_desc_fetch_err(efx, ev_sub_data); + break; + case FSE_BZ_TX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, tx_err, efx->net_dev, + "TX DMA Q %d reports descriptor fetch error." + " TX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); + } else + efx_sriov_desc_fetch_err(efx, ev_sub_data); + break; + default: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d unknown driver event code %d " + "data %04x\n", channel->channel, ev_sub_code, + ev_sub_data); + break; + } +} + +int efx_farch_ev_process(struct efx_channel *channel, int budget) +{ + struct efx_nic *efx = channel->efx; + unsigned int read_ptr; + efx_qword_t event, *p_event; + int ev_code; + int tx_packets = 0; + int spent = 0; + + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr); + event = *p_event; + + if (!efx_event_present(&event)) + /* End of events */ + break; + + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d event is "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(event)); + + /* Clear this event by marking it all ones */ + EFX_SET_QWORD(*p_event); + + ++read_ptr; + + ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); + + switch (ev_code) { + case FSE_AZ_EV_CODE_RX_EV: + efx_farch_handle_rx_event(channel, &event); + if (++spent == budget) + goto out; + break; + case FSE_AZ_EV_CODE_TX_EV: + tx_packets += efx_farch_handle_tx_event(channel, + &event); + if (tx_packets > efx->txq_entries) { + spent = budget; + goto out; + } + break; + case FSE_AZ_EV_CODE_DRV_GEN_EV: + efx_farch_handle_generated_event(channel, &event); + break; + case FSE_AZ_EV_CODE_DRIVER_EV: + efx_farch_handle_driver_event(channel, &event); + break; + case FSE_CZ_EV_CODE_USER_EV: + efx_sriov_event(channel, &event); + break; + case FSE_CZ_EV_CODE_MCDI_EV: + efx_mcdi_process_event(channel, &event); + break; + case FSE_AZ_EV_CODE_GLOBAL_EV: + if (efx->type->handle_global_event && + efx->type->handle_global_event(channel, &event)) + break; + /* else fall through */ + default: + netif_err(channel->efx, hw, channel->efx->net_dev, + "channel %d unknown event type %d (data " + EFX_QWORD_FMT ")\n", channel->channel, + ev_code, EFX_QWORD_VAL(event)); + } + } + +out: + channel->eventq_read_ptr = read_ptr; + return spent; +} + +/* Allocate buffer table entries for event queue */ +int efx_farch_ev_probe(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned entries; + + entries = channel->eventq_mask + 1; + return efx_alloc_special_buffer(efx, &channel->eventq, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_ev_init(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + netif_dbg(efx, hw, efx->net_dev, + "channel %d event queue in special buffers %d-%d\n", + channel->channel, channel->eventq.index, + channel->eventq.index + channel->eventq.entries - 1); + + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + } + + /* Pin event queue buffer */ + efx_init_special_buffer(efx, &channel->eventq); + + /* Fill event queue with all ones (i.e. empty events) */ + memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); + + /* Push event queue to card */ + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), + FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + + efx->type->push_irq_moderation(channel); +} + +void efx_farch_ev_fini(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + /* Remove event queue from card */ + EFX_ZERO_OWORD(reg); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + + /* Unpin event queue */ + efx_fini_special_buffer(efx, &channel->eventq); +} + +/* Free buffers backing event queue */ +void efx_farch_ev_remove(struct efx_channel *channel) +{ + efx_free_special_buffer(channel->efx, &channel->eventq); +} + + +void efx_farch_ev_test_generate(struct efx_channel *channel) +{ + efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); +} + +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_FILL(rx_queue)); +} + +/************************************************************************** + * + * Hardware interrupts + * The hardware interrupt handler does very little work; all the event + * queue processing is carried out by per-channel tasklets. + * + **************************************************************************/ + +/* Enable/disable/generate interrupts */ +static inline void efx_farch_interrupts(struct efx_nic *efx, + bool enabled, bool force) +{ + efx_oword_t int_en_reg_ker; + + EFX_POPULATE_OWORD_3(int_en_reg_ker, + FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, + FRF_AZ_KER_INT_KER, force, + FRF_AZ_DRV_INT_EN_KER, enabled); + efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); +} + +void efx_farch_irq_enable_master(struct efx_nic *efx) +{ + EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); + wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ + + efx_farch_interrupts(efx, true, false); +} + +void efx_farch_irq_disable_master(struct efx_nic *efx) +{ + /* Disable interrupts */ + efx_farch_interrupts(efx, false, false); +} + +/* Generate a test interrupt + * Interrupt must already have been enabled, otherwise nasty things + * may happen. + */ +void efx_farch_irq_test_generate(struct efx_nic *efx) +{ + efx_farch_interrupts(efx, true, true); +} + +/* Process a fatal interrupt + * Disable bus mastering ASAP and schedule a reset + */ +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t *int_ker = efx->irq_status.addr; + efx_oword_t fatal_intr; + int error, mem_perr; + + efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); + error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); + + netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " + EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), + EFX_OWORD_VAL(fatal_intr), + error ? "disabling bus mastering" : "no recognised error"); + + /* If this is a memory parity error dump which blocks are offending */ + mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || + EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); + if (mem_perr) { + efx_oword_t reg; + efx_reado(efx, ®, FR_AZ_MEM_STAT); + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(reg)); + } + + /* Disable both devices */ + pci_clear_master(efx->pci_dev); + if (efx_nic_is_dual_func(efx)) + pci_clear_master(nic_data->pci_dev2); + efx_farch_irq_disable_master(efx); + + /* Count errors and reset or disable the NIC accordingly */ + if (efx->int_error_count == 0 || + time_after(jiffies, efx->int_error_expire)) { + efx->int_error_count = 0; + efx->int_error_expire = + jiffies + EFX_INT_ERROR_EXPIRE * HZ; + } + if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - reset scheduled\n"); + efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); + } else { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - max number of errors seen." + "NIC will be disabled\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + } + + return IRQ_HANDLED; +} + +/* Handle a legacy interrupt + * Acknowledges the interrupt and schedule event queue processing. + */ +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) +{ + struct efx_nic *efx = dev_id; + bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + efx_oword_t *int_ker = efx->irq_status.addr; + irqreturn_t result = IRQ_NONE; + struct efx_channel *channel; + efx_dword_t reg; + u32 queues; + int syserr; + + /* Read the ISR which also ACKs the interrupts */ + efx_readd(efx, ®, FR_BZ_INT_ISR0); + queues = EFX_EXTRACT_DWORD(reg, 0, 31); + + /* Legacy interrupts are disabled too late by the EEH kernel + * code. Disable them earlier. + * If an EEH error occurred, the read will have returned all ones. + */ + if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && + !efx->eeh_disabled_legacy_irq) { + disable_irq_nosync(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = true; + } + + /* Handle non-event-queue sources */ + if (queues & (1U << efx->irq_level) && soft_enabled) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + if (queues != 0) { + if (EFX_WORKAROUND_15783(efx)) + efx->irq_zero_count = 0; + + /* Schedule processing of any interrupting queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + if (queues & 1) + efx_schedule_channel_irq(channel); + queues >>= 1; + } + } + result = IRQ_HANDLED; + + } else if (EFX_WORKAROUND_15783(efx)) { + efx_qword_t *event; + + /* We can't return IRQ_HANDLED more than once on seeing ISR=0 + * because this might be a shared interrupt. */ + if (efx->irq_zero_count++ == 0) + result = IRQ_HANDLED; + + /* Ensure we schedule or rearm all event queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + event = efx_event(channel, + channel->eventq_read_ptr); + if (efx_event_present(event)) + efx_schedule_channel_irq(channel); + else + efx_farch_ev_read_ack(channel); + } + } + } + + if (result == IRQ_HANDLED) + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); + + return result; +} + +/* Handle an MSI interrupt + * + * Handle an MSI hardware interrupt. This routine schedules event + * queue processing. No interrupt acknowledgement cycle is necessary. + * Also, we never need to check that the interrupt is for us, since + * MSI interrupts cannot be shared. + */ +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + efx_oword_t *int_ker = efx->irq_status.addr; + int syserr; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + + if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + return IRQ_HANDLED; + + /* Handle non-event-queue sources */ + if (context->index == efx->irq_level) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(efx->channel[context->index]); + + return IRQ_HANDLED; +} + + +/* Setup RSS indirection table. + * This maps from the hash value of the packet to RXQ + */ +void efx_farch_rx_push_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) + return; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, + efx->rx_indir_table[i]); + efx_writed(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + } +} + +/* Looks at available SRAM resources and works out how many queues we + * can support, and where things like descriptor caches should live. + * + * SRAM is split up as follows: + * 0 buftbl entries for channels + * efx->vf_buftbl_base buftbl entries for SR-IOV + * efx->rx_dc_base RX descriptor caches + * efx->tx_dc_base TX descriptor caches + */ +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) +{ + unsigned vi_count, buftbl_min; + + /* Account for the buffer table entries backing the datapath channels + * and the descriptor caches for those channels. + */ + buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + + efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + + efx->n_channels * EFX_MAX_EVQ_SIZE) + * sizeof(efx_qword_t) / EFX_BUF_SIZE); + vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + +#ifdef CONFIG_SFC_SRIOV + if (efx_sriov_wanted(efx)) { + unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; + + efx->vf_buftbl_base = buftbl_min; + + vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; + vi_count = max(vi_count, EFX_VI_BASE); + buftbl_free = (sram_lim_qw - buftbl_min - + vi_count * vi_dc_entries); + + entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * + efx_vf_size(efx)); + vf_limit = min(buftbl_free / entries_per_vf, + (1024U - EFX_VI_BASE) >> efx->vi_scale); + + if (efx->vf_count > vf_limit) { + netif_err(efx, probe, efx->net_dev, + "Reducing VF count from from %d to %d\n", + efx->vf_count, vf_limit); + efx->vf_count = vf_limit; + } + vi_count += efx->vf_count * efx_vf_size(efx); + } +#endif + + efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; + efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; +} + +u32 efx_farch_fpga_ver(struct efx_nic *efx) +{ + efx_oword_t altera_build; + efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); + return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); +} + +void efx_farch_init_common(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set positions of descriptor caches in SRAM. */ + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); + + /* Set TX descriptor cache size. */ + BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); + + /* Set RX descriptor cache size. Set low watermark to size-8, as + * this allows most efficient prefetching. + */ + BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); + efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); + + /* Program INT_KER address */ + EFX_POPULATE_OWORD_2(temp, + FRF_AZ_NORM_INT_VEC_DIS_KER, + EFX_INT_MODE_USE_MSI(efx), + FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); + efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); + + if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) + /* Use an interrupt level unused by event queues */ + efx->irq_level = 0x1f; + else + /* Use a valid MSI-X vector */ + efx->irq_level = 0; + + /* Enable all the genuinely fatal interrupts. (They are still + * masked by the overall interrupt mask, controlled by + * falcon_interrupts()). + * + * Note: All other fatal interrupts are enabled + */ + EFX_POPULATE_OWORD_3(temp, + FRF_AZ_ILL_ADR_INT_KER_EN, 1, + FRF_AZ_RBUF_OWN_INT_KER_EN, 1, + FRF_AZ_TBUF_OWN_INT_KER_EN, 1); + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) + EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); + EFX_INVERT_OWORD(temp); + efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); + + efx_farch_rx_push_indir_table(efx); + + /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be + * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. + */ + efx_reado(efx, &temp, FR_AZ_TX_RESERVED); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); + /* Enable SW_EV to inherit in char driver - assume harmless here */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); + /* Prefetch threshold 2 => fetch when descriptor cache half empty */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); + /* Disable hardware watchdog which can misfire */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); + /* Squash TX of packets of 16 bytes or less */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_4(temp, + /* Default values */ + FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, + FRF_BZ_TX_PACE_SB_AF, 0xb, + FRF_BZ_TX_PACE_FB_BASE, 0, + /* Allow large pace values in the + * fast bin. */ + FRF_BZ_TX_PACE_BIN_TH, + FFE_BZ_TX_PACE_RESERVED); + efx_writeo(efx, &temp, FR_BZ_TX_PACE); + } +} diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b382895..7283cc1 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -971,7 +971,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @get_wol: Get WoL configuration from driver state * @set_wol: Push WoL configuration to the NIC * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) - * @test_chip: Test registers. Should use efx_nic_test_registers(), and is + * @test_chip: Test registers. May use efx_farch_test_registers(), and is * expected to reset the NIC. * @test_nvram: Test validity of NVRAM contents * @mcdi_request: Send an MCDI request with the given header and SDU. @@ -985,6 +985,32 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, * return an appropriate error code for aborting any current * request; otherwise return 0. + * @irq_enable_master: Enable IRQs on the NIC. Each event queue must + * be separately enabled after this. + * @irq_test_generate: Generate a test IRQ + * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event + * queue must be separately disabled before this. + * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is + * a pointer to the &struct efx_msi_context for the channel. + * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument + * is a pointer to the &struct efx_nic. + * @tx_probe: Allocate resources for TX queue + * @tx_init: Initialise TX queue on the NIC + * @tx_remove: Free resources for TX queue + * @tx_write: Write TX descriptors and doorbell + * @rx_push_indir_table: Write RSS indirection table to the NIC + * @rx_probe: Allocate resources for RX queue + * @rx_init: Initialise RX queue on the NIC + * @rx_remove: Free resources for RX queue + * @rx_write: Write RX descriptors and doorbell + * @rx_defer_refill: Generate a refill reminder event + * @ev_probe: Allocate resources for event queue + * @ev_init: Initialise event queue on the NIC + * @ev_fini: Deinitialise event queue on the NIC + * @ev_remove: Free resources for event queue + * @ev_process: Process events for a queue, up to the given NAPI quota + * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ + * @ev_test_generate: Generate a test event * @revision: Hardware architecture revision * @mem_map_size: Memory BAR mapped size * @txd_ptr_tbl_base: TX descriptor ring base address @@ -1041,6 +1067,28 @@ struct efx_nic_type { void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, size_t pdu_offset, size_t pdu_len); int (*mcdi_poll_reboot)(struct efx_nic *efx); + void (*irq_enable_master)(struct efx_nic *efx); + void (*irq_test_generate)(struct efx_nic *efx); + void (*irq_disable_non_ev)(struct efx_nic *efx); + irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); + irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); + int (*tx_probe)(struct efx_tx_queue *tx_queue); + void (*tx_init)(struct efx_tx_queue *tx_queue); + void (*tx_remove)(struct efx_tx_queue *tx_queue); + void (*tx_write)(struct efx_tx_queue *tx_queue); + void (*rx_push_indir_table)(struct efx_nic *efx); + int (*rx_probe)(struct efx_rx_queue *rx_queue); + void (*rx_init)(struct efx_rx_queue *rx_queue); + void (*rx_remove)(struct efx_rx_queue *rx_queue); + void (*rx_write)(struct efx_rx_queue *rx_queue); + void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); + int (*ev_probe)(struct efx_channel *channel); + void (*ev_init)(struct efx_channel *channel); + void (*ev_fini)(struct efx_channel *channel); + void (*ev_remove)(struct efx_channel *channel); + int (*ev_process)(struct efx_channel *channel, int quota); + void (*ev_read_ack)(struct efx_channel *channel); + void (*ev_test_generate)(struct efx_channel *channel); int revision; unsigned int mem_map_size; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 7c52691..66c71ed 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -25,273 +25,6 @@ /************************************************************************** * - * Configurable values - * - ************************************************************************** - */ - -/* This is set to 16 for a good reason. In summary, if larger than - * 16, the descriptor cache holds more than a default socket - * buffer's worth of packets (for UDP we can only have at most one - * socket buffer's worth outstanding). This combined with the fact - * that we only get 1 TX event per descriptor cache means the NIC - * goes idle. - */ -#define TX_DC_ENTRIES 16 -#define TX_DC_ENTRIES_ORDER 1 - -#define RX_DC_ENTRIES 64 -#define RX_DC_ENTRIES_ORDER 3 - -/* If EFX_MAX_INT_ERRORS internal errors occur within - * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and - * disable it. - */ -#define EFX_INT_ERROR_EXPIRE 3600 -#define EFX_MAX_INT_ERRORS 5 - -/* Depth of RX flush request fifo */ -#define EFX_RX_FLUSH_COUNT 4 - -/* Driver generated events */ -#define _EFX_CHANNEL_MAGIC_TEST 0x000101 -#define _EFX_CHANNEL_MAGIC_FILL 0x000102 -#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 -#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 - -#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) -#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) - -#define EFX_CHANNEL_MAGIC_TEST(_channel) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) -#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ - efx_rx_queue_index(_rx_queue)) -#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ - efx_rx_queue_index(_rx_queue)) -#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ - (_tx_queue)->queue) - -static void efx_magic_event(struct efx_channel *channel, u32 magic); - -/************************************************************************** - * - * Solarstorm hardware access - * - **************************************************************************/ - -static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, - unsigned int index) -{ - efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, - value, index); -} - -/* Read the current event from the event queue */ -static inline efx_qword_t *efx_event(struct efx_channel *channel, - unsigned int index) -{ - return ((efx_qword_t *) (channel->eventq.buf.addr)) + - (index & channel->eventq_mask); -} - -/* See if an event is present - * - * We check both the high and low dword of the event for all ones. We - * wrote all ones when we cleared the event, and no valid event can - * have all ones in either its high or low dwords. This approach is - * robust against reordering. - * - * Note that using a single 64-bit comparison is incorrect; even - * though the CPU read will be atomic, the DMA write may not be. - */ -static inline int efx_event_present(efx_qword_t *event) -{ - return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | - EFX_DWORD_IS_ALL_ONES(event->dword[1])); -} - -static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, - const efx_oword_t *mask) -{ - return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || - ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); -} - -int efx_nic_test_registers(struct efx_nic *efx, - const struct efx_nic_register_test *regs, - size_t n_regs) -{ - unsigned address = 0, i, j; - efx_oword_t mask, imask, original, reg, buf; - - for (i = 0; i < n_regs; ++i) { - address = regs[i].address; - mask = imask = regs[i].mask; - EFX_INVERT_OWORD(imask); - - efx_reado(efx, &original, address); - - /* bit sweep on and off */ - for (j = 0; j < 128; j++) { - if (!EFX_EXTRACT_OWORD32(mask, j, j)) - continue; - - /* Test this testable bit can be set in isolation */ - EFX_AND_OWORD(reg, original, mask); - EFX_SET_OWORD32(reg, j, j, 1); - - efx_writeo(efx, ®, address); - efx_reado(efx, &buf, address); - - if (efx_masked_compare_oword(®, &buf, &mask)) - goto fail; - - /* Test this testable bit can be cleared in isolation */ - EFX_OR_OWORD(reg, original, mask); - EFX_SET_OWORD32(reg, j, j, 0); - - efx_writeo(efx, ®, address); - efx_reado(efx, &buf, address); - - if (efx_masked_compare_oword(®, &buf, &mask)) - goto fail; - } - - efx_writeo(efx, &original, address); - } - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, - "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT - " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), - EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); - return -EIO; -} - -/************************************************************************** - * - * Special buffer handling - * Special buffers are used for event queues and the TX and RX - * descriptor rings. - * - *************************************************************************/ - -/* - * Initialise a special buffer - * - * This will define a buffer (previously allocated via - * efx_alloc_special_buffer()) in the buffer table, allowing - * it to be used for event queues, descriptor rings etc. - */ -static void -efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) -{ - efx_qword_t buf_desc; - unsigned int index; - dma_addr_t dma_addr; - int i; - - EFX_BUG_ON_PARANOID(!buffer->buf.addr); - - /* Write buffer descriptors to NIC */ - for (i = 0; i < buffer->entries; i++) { - index = buffer->index + i; - dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); - netif_dbg(efx, probe, efx->net_dev, - "mapping special buffer %d at %llx\n", - index, (unsigned long long)dma_addr); - EFX_POPULATE_QWORD_3(buf_desc, - FRF_AZ_BUF_ADR_REGION, 0, - FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, - FRF_AZ_BUF_OWNER_ID_FBUF, 0); - efx_write_buf_tbl(efx, &buf_desc, index); - } -} - -/* Unmaps a buffer and clears the buffer table entries */ -static void -efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) -{ - efx_oword_t buf_tbl_upd; - unsigned int start = buffer->index; - unsigned int end = (buffer->index + buffer->entries - 1); - - if (!buffer->entries) - return; - - netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", - buffer->index, buffer->index + buffer->entries - 1); - - EFX_POPULATE_OWORD_4(buf_tbl_upd, - FRF_AZ_BUF_UPD_CMD, 0, - FRF_AZ_BUF_CLR_CMD, 1, - FRF_AZ_BUF_CLR_END_ID, end, - FRF_AZ_BUF_CLR_START_ID, start); - efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); -} - -/* - * Allocate a new special buffer - * - * This allocates memory for a new buffer, clears it and allocates a - * new buffer ID range. It does not write into the buffer table. - * - * This call will allocate 4KB buffers, since 8KB buffers can't be - * used for event queues and descriptor rings. - */ -static int efx_alloc_special_buffer(struct efx_nic *efx, - struct efx_special_buffer *buffer, - unsigned int len) -{ - len = ALIGN(len, EFX_BUF_SIZE); - - if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) - return -ENOMEM; - buffer->entries = len / EFX_BUF_SIZE; - BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); - - /* Select new buffer ID */ - buffer->index = efx->next_buffer_table; - efx->next_buffer_table += buffer->entries; -#ifdef CONFIG_SFC_SRIOV - BUG_ON(efx_sriov_enabled(efx) && - efx->vf_buftbl_base < efx->next_buffer_table); -#endif - - netif_dbg(efx, probe, efx->net_dev, - "allocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, - buffer->index + buffer->entries - 1, - (u64)buffer->buf.dma_addr, len, - buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); - - return 0; -} - -static void -efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) -{ - if (!buffer->buf.addr) - return; - - netif_dbg(efx, hw, efx->net_dev, - "deallocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, - buffer->index + buffer->entries - 1, - (u64)buffer->buf.dma_addr, buffer->buf.len, - buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); - - efx_nic_free_buffer(efx, &buffer->buf); - buffer->entries = 0; -} - -/************************************************************************** - * * Generic buffer handling * These buffers are used for interrupt status, MAC stats, etc. * @@ -318,1079 +51,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) } } -/************************************************************************** - * - * TX path - * - **************************************************************************/ - -/* Returns a pointer to the specified transmit descriptor in the TX - * descriptor queue belonging to the specified channel. - */ -static inline efx_qword_t * -efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) -{ - return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; -} - -/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ -static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) -{ - unsigned write_ptr; - efx_dword_t reg; - - write_ptr = tx_queue->write_count & tx_queue->ptr_mask; - EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); - efx_writed_page(tx_queue->efx, ®, - FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); -} - -/* Write pointer and first descriptor for TX descriptor ring */ -static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, - const efx_qword_t *txd) -{ - unsigned write_ptr; - efx_oword_t reg; - - BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); - BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); - - write_ptr = tx_queue->write_count & tx_queue->ptr_mask; - EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, - FRF_AZ_TX_DESC_WPTR, write_ptr); - reg.qword[0] = *txd; - efx_writeo_page(tx_queue->efx, ®, - FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); -} - -static inline bool -efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) -{ - unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); - - if (empty_read_count == 0) - return false; - - tx_queue->empty_read_count = 0; - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 - && tx_queue->write_count - write_count == 1; -} - -/* For each entry inserted into the software descriptor ring, create a - * descriptor in the hardware TX descriptor ring (in host memory), and - * write a doorbell. - */ -void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) -{ - - struct efx_tx_buffer *buffer; - efx_qword_t *txd; - unsigned write_ptr; - unsigned old_write_count = tx_queue->write_count; - - BUG_ON(tx_queue->write_count == tx_queue->insert_count); - - do { - write_ptr = tx_queue->write_count & tx_queue->ptr_mask; - buffer = &tx_queue->buffer[write_ptr]; - txd = efx_tx_desc(tx_queue, write_ptr); - ++tx_queue->write_count; - - /* Create TX descriptor ring entry */ - BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); - EFX_POPULATE_QWORD_4(*txd, - FSF_AZ_TX_KER_CONT, - buffer->flags & EFX_TX_BUF_CONT, - FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, - FSF_AZ_TX_KER_BUF_REGION, 0, - FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); - } while (tx_queue->write_count != tx_queue->insert_count); - - wmb(); /* Ensure descriptors are written before they are fetched */ - - if (efx_may_push_tx_desc(tx_queue, old_write_count)) { - txd = efx_tx_desc(tx_queue, - old_write_count & tx_queue->ptr_mask); - efx_push_tx_desc(tx_queue, txd); - ++tx_queue->pushes; - } else { - efx_notify_tx_desc(tx_queue); - } -} - -/* Allocate hardware resources for a TX queue */ -int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) -{ - struct efx_nic *efx = tx_queue->efx; - unsigned entries; - - entries = tx_queue->ptr_mask + 1; - return efx_alloc_special_buffer(efx, &tx_queue->txd, - entries * sizeof(efx_qword_t)); -} - -void efx_nic_init_tx(struct efx_tx_queue *tx_queue) -{ - struct efx_nic *efx = tx_queue->efx; - efx_oword_t reg; - - /* Pin TX descriptor ring */ - efx_init_special_buffer(efx, &tx_queue->txd); - - /* Push TX descriptor ring to card */ - EFX_POPULATE_OWORD_10(reg, - FRF_AZ_TX_DESCQ_EN, 1, - FRF_AZ_TX_ISCSI_DDIG_EN, 0, - FRF_AZ_TX_ISCSI_HDIG_EN, 0, - FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, - FRF_AZ_TX_DESCQ_EVQ_ID, - tx_queue->channel->channel, - FRF_AZ_TX_DESCQ_OWNER_ID, 0, - FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, - FRF_AZ_TX_DESCQ_SIZE, - __ffs(tx_queue->txd.entries), - FRF_AZ_TX_DESCQ_TYPE, 0, - FRF_BZ_TX_NON_IP_DROP_DIS, 1); - - if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { - int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; - EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); - EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, - !csum); - } - - efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, - tx_queue->queue); - - if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { - /* Only 128 bits in this register */ - BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); - - efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); - if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) - __clear_bit_le(tx_queue->queue, ®); - else - __set_bit_le(tx_queue->queue, ®); - efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); - } - - if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { - EFX_POPULATE_OWORD_1(reg, - FRF_BZ_TX_PACE, - (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? - FFE_BZ_TX_PACE_OFF : - FFE_BZ_TX_PACE_RESERVED); - efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, - tx_queue->queue); - } -} - -static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) -{ - struct efx_nic *efx = tx_queue->efx; - efx_oword_t tx_flush_descq; - - WARN_ON(atomic_read(&tx_queue->flush_outstanding)); - atomic_set(&tx_queue->flush_outstanding, 1); - - EFX_POPULATE_OWORD_2(tx_flush_descq, - FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, - FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); - efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); -} - -void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) -{ - struct efx_nic *efx = tx_queue->efx; - efx_oword_t tx_desc_ptr; - - /* Remove TX descriptor ring from card */ - EFX_ZERO_OWORD(tx_desc_ptr); - efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, - tx_queue->queue); - - /* Unpin TX descriptor ring */ - efx_fini_special_buffer(efx, &tx_queue->txd); -} - -/* Free buffers backing TX queue */ -void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) -{ - efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); -} - -/************************************************************************** - * - * RX path - * - **************************************************************************/ - -/* Returns a pointer to the specified descriptor in the RX descriptor queue */ -static inline efx_qword_t * -efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) -{ - return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; -} - -/* This creates an entry in the RX descriptor queue */ -static inline void -efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) -{ - struct efx_rx_buffer *rx_buf; - efx_qword_t *rxd; - - rxd = efx_rx_desc(rx_queue, index); - rx_buf = efx_rx_buffer(rx_queue, index); - EFX_POPULATE_QWORD_3(*rxd, - FSF_AZ_RX_KER_BUF_SIZE, - rx_buf->len - - rx_queue->efx->type->rx_buffer_padding, - FSF_AZ_RX_KER_BUF_REGION, 0, - FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); -} - -/* This writes to the RX_DESC_WPTR register for the specified receive - * descriptor ring. - */ -void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) -{ - struct efx_nic *efx = rx_queue->efx; - efx_dword_t reg; - unsigned write_ptr; - - while (rx_queue->notified_count != rx_queue->added_count) { - efx_build_rx_desc( - rx_queue, - rx_queue->notified_count & rx_queue->ptr_mask); - ++rx_queue->notified_count; - } - - wmb(); - write_ptr = rx_queue->added_count & rx_queue->ptr_mask; - EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); - efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, - efx_rx_queue_index(rx_queue)); -} - -int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) -{ - struct efx_nic *efx = rx_queue->efx; - unsigned entries; - - entries = rx_queue->ptr_mask + 1; - return efx_alloc_special_buffer(efx, &rx_queue->rxd, - entries * sizeof(efx_qword_t)); -} - -void efx_nic_init_rx(struct efx_rx_queue *rx_queue) -{ - efx_oword_t rx_desc_ptr; - struct efx_nic *efx = rx_queue->efx; - bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; - bool iscsi_digest_en = is_b0; - bool jumbo_en; - - /* For kernel-mode queues in Falcon A1, the JUMBO flag enables - * DMA to continue after a PCIe page boundary (and scattering - * is not possible). In Falcon B0 and Siena, it enables - * scatter. - */ - jumbo_en = !is_b0 || efx->rx_scatter; - - netif_dbg(efx, hw, efx->net_dev, - "RX queue %d ring in special buffers %d-%d\n", - efx_rx_queue_index(rx_queue), rx_queue->rxd.index, - rx_queue->rxd.index + rx_queue->rxd.entries - 1); - - rx_queue->scatter_n = 0; - - /* Pin RX descriptor ring */ - efx_init_special_buffer(efx, &rx_queue->rxd); - - /* Push RX descriptor ring to card */ - EFX_POPULATE_OWORD_10(rx_desc_ptr, - FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, - FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, - FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, - FRF_AZ_RX_DESCQ_EVQ_ID, - efx_rx_queue_channel(rx_queue)->channel, - FRF_AZ_RX_DESCQ_OWNER_ID, 0, - FRF_AZ_RX_DESCQ_LABEL, - efx_rx_queue_index(rx_queue), - FRF_AZ_RX_DESCQ_SIZE, - __ffs(rx_queue->rxd.entries), - FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , - FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, - FRF_AZ_RX_DESCQ_EN, 1); - efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, - efx_rx_queue_index(rx_queue)); -} - -static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) -{ - struct efx_nic *efx = rx_queue->efx; - efx_oword_t rx_flush_descq; - - EFX_POPULATE_OWORD_2(rx_flush_descq, - FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, - FRF_AZ_RX_FLUSH_DESCQ, - efx_rx_queue_index(rx_queue)); - efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); -} - -void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) -{ - efx_oword_t rx_desc_ptr; - struct efx_nic *efx = rx_queue->efx; - - /* Remove RX descriptor ring from card */ - EFX_ZERO_OWORD(rx_desc_ptr); - efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, - efx_rx_queue_index(rx_queue)); - - /* Unpin RX descriptor ring */ - efx_fini_special_buffer(efx, &rx_queue->rxd); -} - -/* Free buffers backing RX queue */ -void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) -{ - efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); -} - -/************************************************************************** - * - * Flush handling - * - **************************************************************************/ - -/* efx_nic_flush_queues() must be woken up when all flushes are completed, - * or more RX flushes can be kicked off. - */ -static bool efx_flush_wake(struct efx_nic *efx) -{ - /* Ensure that all updates are visible to efx_nic_flush_queues() */ - smp_mb(); - - return (atomic_read(&efx->drain_pending) == 0 || - (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT - && atomic_read(&efx->rxq_flush_pending) > 0)); -} - -static bool efx_check_tx_flush_complete(struct efx_nic *efx) -{ - bool i = true; - efx_oword_t txd_ptr_tbl; - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - efx_reado_table(efx, &txd_ptr_tbl, - FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); - if (EFX_OWORD_FIELD(txd_ptr_tbl, - FRF_AZ_TX_DESCQ_FLUSH) || - EFX_OWORD_FIELD(txd_ptr_tbl, - FRF_AZ_TX_DESCQ_EN)) { - netif_dbg(efx, hw, efx->net_dev, - "flush did not complete on TXQ %d\n", - tx_queue->queue); - i = false; - } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, - 1, 0)) { - /* The flush is complete, but we didn't - * receive a flush completion event - */ - netif_dbg(efx, hw, efx->net_dev, - "flush complete on TXQ %d, so drain " - "the queue\n", tx_queue->queue); - /* Don't need to increment drain_pending as it - * has already been incremented for the queues - * which did not drain - */ - efx_magic_event(channel, - EFX_CHANNEL_MAGIC_TX_DRAIN( - tx_queue)); - } - } - } - - return i; -} - -/* Flush all the transmit queues, and continue flushing receive queues until - * they're all flushed. Wait for the DRAIN events to be recieved so that there - * are no more RX and TX events left on any channel. */ -static int efx_farch_do_flush(struct efx_nic *efx) -{ - unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ - struct efx_channel *channel; - struct efx_rx_queue *rx_queue; - struct efx_tx_queue *tx_queue; - int rc = 0; - - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - atomic_inc(&efx->drain_pending); - efx_flush_tx_queue(tx_queue); - } - efx_for_each_channel_rx_queue(rx_queue, channel) { - atomic_inc(&efx->drain_pending); - rx_queue->flush_pending = true; - atomic_inc(&efx->rxq_flush_pending); - } - } - - while (timeout && atomic_read(&efx->drain_pending) > 0) { - /* If SRIOV is enabled, then offload receive queue flushing to - * the firmware (though we will still have to poll for - * completion). If that fails, fall back to the old scheme. - */ - if (efx_sriov_enabled(efx)) { - rc = efx_mcdi_flush_rxqs(efx); - if (!rc) - goto wait; - } - - /* The hardware supports four concurrent rx flushes, each of - * which may need to be retried if there is an outstanding - * descriptor fetch - */ - efx_for_each_channel(channel, efx) { - efx_for_each_channel_rx_queue(rx_queue, channel) { - if (atomic_read(&efx->rxq_flush_outstanding) >= - EFX_RX_FLUSH_COUNT) - break; - - if (rx_queue->flush_pending) { - rx_queue->flush_pending = false; - atomic_dec(&efx->rxq_flush_pending); - atomic_inc(&efx->rxq_flush_outstanding); - efx_flush_rx_queue(rx_queue); - } - } - } - - wait: - timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), - timeout); - } - - if (atomic_read(&efx->drain_pending) && - !efx_check_tx_flush_complete(efx)) { - netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " - "(rx %d+%d)\n", atomic_read(&efx->drain_pending), - atomic_read(&efx->rxq_flush_outstanding), - atomic_read(&efx->rxq_flush_pending)); - rc = -ETIMEDOUT; - - atomic_set(&efx->drain_pending, 0); - atomic_set(&efx->rxq_flush_pending, 0); - atomic_set(&efx->rxq_flush_outstanding, 0); - } - - return rc; -} - -int efx_farch_fini_dmaq(struct efx_nic *efx) -{ - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - struct efx_rx_queue *rx_queue; - int rc = 0; - - /* Do not attempt to write to the NIC during EEH recovery */ - if (efx->state != STATE_RECOVERY) { - /* Only perform flush if DMA is enabled */ - if (efx->pci_dev->is_busmaster) { - efx->type->prepare_flush(efx); - rc = efx_farch_do_flush(efx); - efx->type->finish_flush(efx); - } - - efx_for_each_channel(channel, efx) { - efx_for_each_channel_rx_queue(rx_queue, channel) - efx_nic_fini_rx(rx_queue); - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_nic_fini_tx(tx_queue); - } - } - - return rc; -} - -/************************************************************************** - * - * Event queue processing - * Event queues are processed by per-channel tasklets. - * - **************************************************************************/ - -/* Update a channel's event queue's read pointer (RPTR) register - * - * This writes the EVQ_RPTR_REG register for the specified channel's - * event queue. - */ -void efx_nic_eventq_read_ack(struct efx_channel *channel) -{ - efx_dword_t reg; - struct efx_nic *efx = channel->efx; - - EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, - channel->eventq_read_ptr & channel->eventq_mask); - - /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size - * of 4 bytes, but it is really 16 bytes just like later revisions. - */ - efx_writed(efx, ®, - efx->type->evq_rptr_tbl_base + - FR_BZ_EVQ_RPTR_STEP * channel->channel); -} - -/* Use HW to insert a SW defined event */ -void efx_generate_event(struct efx_nic *efx, unsigned int evq, - efx_qword_t *event) -{ - efx_oword_t drv_ev_reg; - - BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || - FRF_AZ_DRV_EV_DATA_WIDTH != 64); - drv_ev_reg.u32[0] = event->u32[0]; - drv_ev_reg.u32[1] = event->u32[1]; - drv_ev_reg.u32[2] = 0; - drv_ev_reg.u32[3] = 0; - EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); - efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); -} - -static void efx_magic_event(struct efx_channel *channel, u32 magic) -{ - efx_qword_t event; - - EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, - FSE_AZ_EV_CODE_DRV_GEN_EV, - FSF_AZ_DRV_GEN_EV_MAGIC, magic); - efx_generate_event(channel->efx, channel->channel, &event); -} - -/* Handle a transmit completion event - * - * The NIC batches TX completion events; the message we receive is of - * the form "complete all TX events up to this index". - */ -static int -efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) -{ - unsigned int tx_ev_desc_ptr; - unsigned int tx_ev_q_label; - struct efx_tx_queue *tx_queue; - struct efx_nic *efx = channel->efx; - int tx_packets = 0; - - if (unlikely(ACCESS_ONCE(efx->reset_pending))) - return 0; - - if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { - /* Transmit completion */ - tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); - tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = efx_channel_get_tx_queue( - channel, tx_ev_q_label % EFX_TXQ_TYPES); - tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & - tx_queue->ptr_mask); - efx_xmit_done(tx_queue, tx_ev_desc_ptr); - } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { - /* Rewrite the FIFO write pointer */ - tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = efx_channel_get_tx_queue( - channel, tx_ev_q_label % EFX_TXQ_TYPES); - - netif_tx_lock(efx->net_dev); - efx_notify_tx_desc(tx_queue); - netif_tx_unlock(efx->net_dev); - } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && - EFX_WORKAROUND_10727(efx)) { - efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); - } else { - netif_err(efx, tx_err, efx->net_dev, - "channel %d unexpected TX event " - EFX_QWORD_FMT"\n", channel->channel, - EFX_QWORD_VAL(*event)); - } - - return tx_packets; -} - -/* Detect errors included in the rx_evt_pkt_ok bit. */ -static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, - const efx_qword_t *event) -{ - struct efx_channel *channel = efx_rx_queue_channel(rx_queue); - struct efx_nic *efx = rx_queue->efx; - bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; - bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; - bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; - bool rx_ev_other_err, rx_ev_pause_frm; - bool rx_ev_hdr_type, rx_ev_mcast_pkt; - unsigned rx_ev_pkt_type; - - rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); - rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); - rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); - rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); - rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, - FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); - rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, - FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); - rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, - FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); - rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); - rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); - rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? - 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); - rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); - - /* Every error apart from tobe_disc and pause_frm */ - rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | - rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | - rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); - - /* Count errors that are not in MAC stats. Ignore expected - * checksum errors during self-test. */ - if (rx_ev_frm_trunc) - ++channel->n_rx_frm_trunc; - else if (rx_ev_tobe_disc) - ++channel->n_rx_tobe_disc; - else if (!efx->loopback_selftest) { - if (rx_ev_ip_hdr_chksum_err) - ++channel->n_rx_ip_hdr_chksum_err; - else if (rx_ev_tcp_udp_chksum_err) - ++channel->n_rx_tcp_udp_chksum_err; - } - - /* TOBE_DISC is expected on unicast mismatches; don't print out an - * error message. FRM_TRUNC indicates RXDP dropped the packet due - * to a FIFO overflow. - */ -#ifdef DEBUG - if (rx_ev_other_err && net_ratelimit()) { - netif_dbg(efx, rx_err, efx->net_dev, - " RX queue %d unexpected RX event " - EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", - efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), - rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", - rx_ev_ip_hdr_chksum_err ? - " [IP_HDR_CHKSUM_ERR]" : "", - rx_ev_tcp_udp_chksum_err ? - " [TCP_UDP_CHKSUM_ERR]" : "", - rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", - rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", - rx_ev_drib_nib ? " [DRIB_NIB]" : "", - rx_ev_tobe_disc ? " [TOBE_DISC]" : "", - rx_ev_pause_frm ? " [PAUSE]" : ""); - } -#endif - - /* The frame must be discarded if any of these are true. */ - return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | - rx_ev_tobe_disc | rx_ev_pause_frm) ? - EFX_RX_PKT_DISCARD : 0; -} - -/* Handle receive events that are not in-order. Return true if this - * can be handled as a partial packet discard, false if it's more - * serious. - */ -static bool -efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) -{ - struct efx_channel *channel = efx_rx_queue_channel(rx_queue); - struct efx_nic *efx = rx_queue->efx; - unsigned expected, dropped; - - if (rx_queue->scatter_n && - index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & - rx_queue->ptr_mask)) { - ++channel->n_rx_nodesc_trunc; - return true; - } - - expected = rx_queue->removed_count & rx_queue->ptr_mask; - dropped = (index - expected) & rx_queue->ptr_mask; - netif_info(efx, rx_err, efx->net_dev, - "dropped %d events (index=%d expected=%d)\n", - dropped, index, expected); - - efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? - RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); - return false; -} - -/* Handle a packet received event - * - * The NIC gives a "discard" flag if it's a unicast packet with the - * wrong destination address - * Also "is multicast" and "matches multicast filter" flags can be used to - * discard non-matching multicast packets. - */ -static void -efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) -{ - unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; - unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; - unsigned expected_ptr; - bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; - u16 flags; - struct efx_rx_queue *rx_queue; - struct efx_nic *efx = channel->efx; - - if (unlikely(ACCESS_ONCE(efx->reset_pending))) - return; - - rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); - rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); - WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != - channel->channel); - - rx_queue = efx_channel_get_rx_queue(channel); - - rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); - expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & - rx_queue->ptr_mask); - - /* Check for partial drops and other errors */ - if (unlikely(rx_ev_desc_ptr != expected_ptr) || - unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { - if (rx_ev_desc_ptr != expected_ptr && - !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) - return; - - /* Discard all pending fragments */ - if (rx_queue->scatter_n) { - efx_rx_packet( - rx_queue, - rx_queue->removed_count & rx_queue->ptr_mask, - rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); - rx_queue->removed_count += rx_queue->scatter_n; - rx_queue->scatter_n = 0; - } - - /* Return if there is no new fragment */ - if (rx_ev_desc_ptr != expected_ptr) - return; - - /* Discard new fragment if not SOP */ - if (!rx_ev_sop) { - efx_rx_packet( - rx_queue, - rx_queue->removed_count & rx_queue->ptr_mask, - 1, 0, EFX_RX_PKT_DISCARD); - ++rx_queue->removed_count; - return; - } - } - - ++rx_queue->scatter_n; - if (rx_ev_cont) - return; - - rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); - rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); - rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); - - if (likely(rx_ev_pkt_ok)) { - /* If packet is marked as OK then we can rely on the - * hardware checksum and classification. - */ - flags = 0; - switch (rx_ev_hdr_type) { - case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: - flags |= EFX_RX_PKT_TCP; - /* fall through */ - case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: - flags |= EFX_RX_PKT_CSUMMED; - /* fall through */ - case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: - case FSE_AZ_RX_EV_HDR_TYPE_OTHER: - break; - } - } else { - flags = efx_handle_rx_not_ok(rx_queue, event); - } - - /* Detect multicast packets that didn't match the filter */ - rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); - if (rx_ev_mcast_pkt) { - unsigned int rx_ev_mcast_hash_match = - EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); - - if (unlikely(!rx_ev_mcast_hash_match)) { - ++channel->n_rx_mcast_mismatch; - flags |= EFX_RX_PKT_DISCARD; - } - } - - channel->irq_mod_score += 2; - - /* Handle received packet */ - efx_rx_packet(rx_queue, - rx_queue->removed_count & rx_queue->ptr_mask, - rx_queue->scatter_n, rx_ev_byte_cnt, flags); - rx_queue->removed_count += rx_queue->scatter_n; - rx_queue->scatter_n = 0; -} - -/* If this flush done event corresponds to a &struct efx_tx_queue, then - * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue - * of all transmit completions. - */ -static void -efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) -{ - struct efx_tx_queue *tx_queue; - int qid; - - qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); - if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { - tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, - qid % EFX_TXQ_TYPES); - if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { - efx_magic_event(tx_queue->channel, - EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); - } - } -} - -/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush - * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add - * the RX queue back to the mask of RX queues in need of flushing. - */ -static void -efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) -{ - struct efx_channel *channel; - struct efx_rx_queue *rx_queue; - int qid; - bool failed; - - qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); - failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); - if (qid >= efx->n_channels) - return; - channel = efx_get_channel(efx, qid); - if (!efx_channel_has_rx_queue(channel)) - return; - rx_queue = efx_channel_get_rx_queue(channel); - - if (failed) { - netif_info(efx, hw, efx->net_dev, - "RXQ %d flush retry\n", qid); - rx_queue->flush_pending = true; - atomic_inc(&efx->rxq_flush_pending); - } else { - efx_magic_event(efx_rx_queue_channel(rx_queue), - EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); - } - atomic_dec(&efx->rxq_flush_outstanding); - if (efx_flush_wake(efx)) - wake_up(&efx->flush_wq); -} - -static void -efx_handle_drain_event(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - - WARN_ON(atomic_read(&efx->drain_pending) == 0); - atomic_dec(&efx->drain_pending); - if (efx_flush_wake(efx)) - wake_up(&efx->flush_wq); -} - -static void -efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) -{ - struct efx_nic *efx = channel->efx; - struct efx_rx_queue *rx_queue = - efx_channel_has_rx_queue(channel) ? - efx_channel_get_rx_queue(channel) : NULL; - unsigned magic, code; - - magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); - code = _EFX_CHANNEL_MAGIC_CODE(magic); - - if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { - channel->event_test_cpu = raw_smp_processor_id(); - } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { - /* The queue must be empty, so we won't receive any rx - * events, so efx_process_channel() won't refill the - * queue. Refill it here */ - efx_fast_push_rx_descriptors(rx_queue); - } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { - efx_handle_drain_event(channel); - } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { - efx_handle_drain_event(channel); - } else { - netif_dbg(efx, hw, efx->net_dev, "channel %d received " - "generated event "EFX_QWORD_FMT"\n", - channel->channel, EFX_QWORD_VAL(*event)); - } -} - -static void -efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) -{ - struct efx_nic *efx = channel->efx; - unsigned int ev_sub_code; - unsigned int ev_sub_data; - - ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); - ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); - - switch (ev_sub_code) { - case FSE_AZ_TX_DESCQ_FLS_DONE_EV: - netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", - channel->channel, ev_sub_data); - efx_handle_tx_flush_done(efx, event); - efx_sriov_tx_flush_done(efx, event); - break; - case FSE_AZ_RX_DESCQ_FLS_DONE_EV: - netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", - channel->channel, ev_sub_data); - efx_handle_rx_flush_done(efx, event); - efx_sriov_rx_flush_done(efx, event); - break; - case FSE_AZ_EVQ_INIT_DONE_EV: - netif_dbg(efx, hw, efx->net_dev, - "channel %d EVQ %d initialised\n", - channel->channel, ev_sub_data); - break; - case FSE_AZ_SRM_UPD_DONE_EV: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d SRAM update done\n", channel->channel); - break; - case FSE_AZ_WAKE_UP_EV: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d RXQ %d wakeup event\n", - channel->channel, ev_sub_data); - break; - case FSE_AZ_TIMER_EV: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d RX queue %d timer expired\n", - channel->channel, ev_sub_data); - break; - case FSE_AA_RX_RECOVER_EV: - netif_err(efx, rx_err, efx->net_dev, - "channel %d seen DRIVER RX_RESET event. " - "Resetting.\n", channel->channel); - atomic_inc(&efx->rx_reset); - efx_schedule_reset(efx, - EFX_WORKAROUND_6555(efx) ? - RESET_TYPE_RX_RECOVERY : - RESET_TYPE_DISABLE); - break; - case FSE_BZ_RX_DSC_ERROR_EV: - if (ev_sub_data < EFX_VI_BASE) { - netif_err(efx, rx_err, efx->net_dev, - "RX DMA Q %d reports descriptor fetch error." - " RX Q %d is disabled.\n", ev_sub_data, - ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); - } else - efx_sriov_desc_fetch_err(efx, ev_sub_data); - break; - case FSE_BZ_TX_DSC_ERROR_EV: - if (ev_sub_data < EFX_VI_BASE) { - netif_err(efx, tx_err, efx->net_dev, - "TX DMA Q %d reports descriptor fetch error." - " TX Q %d is disabled.\n", ev_sub_data, - ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); - } else - efx_sriov_desc_fetch_err(efx, ev_sub_data); - break; - default: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d unknown driver event code %d " - "data %04x\n", channel->channel, ev_sub_code, - ev_sub_data); - break; - } -} - -int efx_nic_process_eventq(struct efx_channel *channel, int budget) -{ - struct efx_nic *efx = channel->efx; - unsigned int read_ptr; - efx_qword_t event, *p_event; - int ev_code; - int tx_packets = 0; - int spent = 0; - - read_ptr = channel->eventq_read_ptr; - - for (;;) { - p_event = efx_event(channel, read_ptr); - event = *p_event; - - if (!efx_event_present(&event)) - /* End of events */ - break; - - netif_vdbg(channel->efx, intr, channel->efx->net_dev, - "channel %d event is "EFX_QWORD_FMT"\n", - channel->channel, EFX_QWORD_VAL(event)); - - /* Clear this event by marking it all ones */ - EFX_SET_QWORD(*p_event); - - ++read_ptr; - - ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); - - switch (ev_code) { - case FSE_AZ_EV_CODE_RX_EV: - efx_handle_rx_event(channel, &event); - if (++spent == budget) - goto out; - break; - case FSE_AZ_EV_CODE_TX_EV: - tx_packets += efx_handle_tx_event(channel, &event); - if (tx_packets > efx->txq_entries) { - spent = budget; - goto out; - } - break; - case FSE_AZ_EV_CODE_DRV_GEN_EV: - efx_handle_generated_event(channel, &event); - break; - case FSE_AZ_EV_CODE_DRIVER_EV: - efx_handle_driver_event(channel, &event); - break; - case FSE_CZ_EV_CODE_USER_EV: - efx_sriov_event(channel, &event); - break; - case FSE_CZ_EV_CODE_MCDI_EV: - efx_mcdi_process_event(channel, &event); - break; - case FSE_AZ_EV_CODE_GLOBAL_EV: - if (efx->type->handle_global_event && - efx->type->handle_global_event(channel, &event)) - break; - /* else fall through */ - default: - netif_err(channel->efx, hw, channel->efx->net_dev, - "channel %d unknown event type %d (data " - EFX_QWORD_FMT ")\n", channel->channel, - ev_code, EFX_QWORD_VAL(event)); - } - } - -out: - channel->eventq_read_ptr = read_ptr; - return spent; -} - /* Check whether an event is present in the eventq at the current * read pointer. Only useful for self-test. */ @@ -1399,326 +59,18 @@ bool efx_nic_event_present(struct efx_channel *channel) return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); } -/* Allocate buffer table entries for event queue */ -int efx_nic_probe_eventq(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - unsigned entries; - - entries = channel->eventq_mask + 1; - return efx_alloc_special_buffer(efx, &channel->eventq, - entries * sizeof(efx_qword_t)); -} - -void efx_nic_init_eventq(struct efx_channel *channel) -{ - efx_oword_t reg; - struct efx_nic *efx = channel->efx; - - netif_dbg(efx, hw, efx->net_dev, - "channel %d event queue in special buffers %d-%d\n", - channel->channel, channel->eventq.index, - channel->eventq.index + channel->eventq.entries - 1); - - if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { - EFX_POPULATE_OWORD_3(reg, - FRF_CZ_TIMER_Q_EN, 1, - FRF_CZ_HOST_NOTIFY_MODE, 0, - FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); - efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); - } - - /* Pin event queue buffer */ - efx_init_special_buffer(efx, &channel->eventq); - - /* Fill event queue with all ones (i.e. empty events) */ - memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); - - /* Push event queue to card */ - EFX_POPULATE_OWORD_3(reg, - FRF_AZ_EVQ_EN, 1, - FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), - FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); - efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, - channel->channel); - - efx->type->push_irq_moderation(channel); -} - -void efx_nic_fini_eventq(struct efx_channel *channel) -{ - efx_oword_t reg; - struct efx_nic *efx = channel->efx; - - /* Remove event queue from card */ - EFX_ZERO_OWORD(reg); - efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, - channel->channel); - if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) - efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); - - /* Unpin event queue */ - efx_fini_special_buffer(efx, &channel->eventq); -} - -/* Free buffers backing event queue */ -void efx_nic_remove_eventq(struct efx_channel *channel) -{ - efx_free_special_buffer(channel->efx, &channel->eventq); -} - - void efx_nic_event_test_start(struct efx_channel *channel) { channel->event_test_cpu = -1; smp_wmb(); - efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); -} - -void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) -{ - efx_magic_event(efx_rx_queue_channel(rx_queue), - EFX_CHANNEL_MAGIC_FILL(rx_queue)); -} - -/************************************************************************** - * - * Hardware interrupts - * The hardware interrupt handler does very little work; all the event - * queue processing is carried out by per-channel tasklets. - * - **************************************************************************/ - -/* Enable/disable/generate interrupts */ -static inline void efx_nic_interrupts(struct efx_nic *efx, - bool enabled, bool force) -{ - efx_oword_t int_en_reg_ker; - - EFX_POPULATE_OWORD_3(int_en_reg_ker, - FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, - FRF_AZ_KER_INT_KER, force, - FRF_AZ_DRV_INT_EN_KER, enabled); - efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); + channel->efx->type->ev_test_generate(channel); } -void efx_nic_enable_interrupts(struct efx_nic *efx) -{ - EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); - wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ - - efx_nic_interrupts(efx, true, false); -} - -void efx_nic_disable_interrupts(struct efx_nic *efx) -{ - /* Disable interrupts */ - efx_nic_interrupts(efx, false, false); -} - -/* Generate a test interrupt - * Interrupt must already have been enabled, otherwise nasty things - * may happen. - */ void efx_nic_irq_test_start(struct efx_nic *efx) { efx->last_irq_cpu = -1; smp_wmb(); - efx_nic_interrupts(efx, true, true); -} - -/* Process a fatal interrupt - * Disable bus mastering ASAP and schedule a reset - */ -irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) -{ - struct falcon_nic_data *nic_data = efx->nic_data; - efx_oword_t *int_ker = efx->irq_status.addr; - efx_oword_t fatal_intr; - int error, mem_perr; - - efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); - error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); - - netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " - EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), - EFX_OWORD_VAL(fatal_intr), - error ? "disabling bus mastering" : "no recognised error"); - - /* If this is a memory parity error dump which blocks are offending */ - mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || - EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); - if (mem_perr) { - efx_oword_t reg; - efx_reado(efx, ®, FR_AZ_MEM_STAT); - netif_err(efx, hw, efx->net_dev, - "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", - EFX_OWORD_VAL(reg)); - } - - /* Disable both devices */ - pci_clear_master(efx->pci_dev); - if (efx_nic_is_dual_func(efx)) - pci_clear_master(nic_data->pci_dev2); - efx_nic_disable_interrupts(efx); - - /* Count errors and reset or disable the NIC accordingly */ - if (efx->int_error_count == 0 || - time_after(jiffies, efx->int_error_expire)) { - efx->int_error_count = 0; - efx->int_error_expire = - jiffies + EFX_INT_ERROR_EXPIRE * HZ; - } - if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { - netif_err(efx, hw, efx->net_dev, - "SYSTEM ERROR - reset scheduled\n"); - efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); - } else { - netif_err(efx, hw, efx->net_dev, - "SYSTEM ERROR - max number of errors seen." - "NIC will be disabled\n"); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); - } - - return IRQ_HANDLED; -} - -/* Handle a legacy interrupt - * Acknowledges the interrupt and schedule event queue processing. - */ -static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) -{ - struct efx_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); - efx_oword_t *int_ker = efx->irq_status.addr; - irqreturn_t result = IRQ_NONE; - struct efx_channel *channel; - efx_dword_t reg; - u32 queues; - int syserr; - - /* Read the ISR which also ACKs the interrupts */ - efx_readd(efx, ®, FR_BZ_INT_ISR0); - queues = EFX_EXTRACT_DWORD(reg, 0, 31); - - /* Legacy interrupts are disabled too late by the EEH kernel - * code. Disable them earlier. - * If an EEH error occurred, the read will have returned all ones. - */ - if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && - !efx->eeh_disabled_legacy_irq) { - disable_irq_nosync(efx->legacy_irq); - efx->eeh_disabled_legacy_irq = true; - } - - /* Handle non-event-queue sources */ - if (queues & (1U << efx->irq_level) && soft_enabled) { - syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); - if (unlikely(syserr)) - return efx_nic_fatal_interrupt(efx); - efx->last_irq_cpu = raw_smp_processor_id(); - } - - if (queues != 0) { - if (EFX_WORKAROUND_15783(efx)) - efx->irq_zero_count = 0; - - /* Schedule processing of any interrupting queues */ - if (likely(soft_enabled)) { - efx_for_each_channel(channel, efx) { - if (queues & 1) - efx_schedule_channel_irq(channel); - queues >>= 1; - } - } - result = IRQ_HANDLED; - - } else if (EFX_WORKAROUND_15783(efx)) { - efx_qword_t *event; - - /* We can't return IRQ_HANDLED more than once on seeing ISR=0 - * because this might be a shared interrupt. */ - if (efx->irq_zero_count++ == 0) - result = IRQ_HANDLED; - - /* Ensure we schedule or rearm all event queues */ - if (likely(soft_enabled)) { - efx_for_each_channel(channel, efx) { - event = efx_event(channel, - channel->eventq_read_ptr); - if (efx_event_present(event)) - efx_schedule_channel_irq(channel); - else - efx_nic_eventq_read_ack(channel); - } - } - } - - if (result == IRQ_HANDLED) - netif_vdbg(efx, intr, efx->net_dev, - "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", - irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); - - return result; -} - -/* Handle an MSI interrupt - * - * Handle an MSI hardware interrupt. This routine schedules event - * queue processing. No interrupt acknowledgement cycle is necessary. - * Also, we never need to check that the interrupt is for us, since - * MSI interrupts cannot be shared. - */ -static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) -{ - struct efx_msi_context *context = dev_id; - struct efx_nic *efx = context->efx; - efx_oword_t *int_ker = efx->irq_status.addr; - int syserr; - - netif_vdbg(efx, intr, efx->net_dev, - "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", - irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); - - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) - return IRQ_HANDLED; - - /* Handle non-event-queue sources */ - if (context->index == efx->irq_level) { - syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); - if (unlikely(syserr)) - return efx_nic_fatal_interrupt(efx); - efx->last_irq_cpu = raw_smp_processor_id(); - } - - /* Schedule processing of the channel */ - efx_schedule_channel_irq(efx->channel[context->index]); - - return IRQ_HANDLED; -} - - -/* Setup RSS indirection table. - * This maps from the hash value of the packet to RXQ - */ -void efx_nic_push_rx_indir_table(struct efx_nic *efx) -{ - size_t i = 0; - efx_dword_t dword; - - if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) - return; - - BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != - FR_BZ_RX_INDIRECTION_TBL_ROWS); - - for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { - EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, - efx->rx_indir_table[i]); - efx_writed(efx, &dword, - FR_BZ_RX_INDIRECTION_TBL + - FR_BZ_RX_INDIRECTION_TBL_STEP * i); - } + efx->type->irq_test_generate(efx); } /* Hook interrupt handler(s) @@ -1731,13 +83,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) int rc; if (!EFX_INT_MODE_USE_MSI(efx)) { - irq_handler_t handler; - if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) - handler = efx_legacy_interrupt; - else - handler = falcon_legacy_interrupt_a1; - - rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, + rc = request_irq(efx->legacy_irq, + efx->type->irq_handle_legacy, IRQF_SHARED, efx->name, efx); if (rc) { netif_err(efx, drv, efx->net_dev, @@ -1762,7 +109,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx) /* Hook MSI or MSI-X interrupt */ n_irqs = 0; efx_for_each_channel(channel, efx) { - rc = request_irq(channel->irq, efx_msi_interrupt, + rc = request_irq(channel->irq, efx->type->irq_handle_msi, IRQF_PROBE_SHARED, /* Not shared */ efx->msi_context[channel->channel].name, &efx->msi_context[channel->channel]); @@ -1818,154 +165,6 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) free_irq(efx->legacy_irq, efx); } -/* Looks at available SRAM resources and works out how many queues we - * can support, and where things like descriptor caches should live. - * - * SRAM is split up as follows: - * 0 buftbl entries for channels - * efx->vf_buftbl_base buftbl entries for SR-IOV - * efx->rx_dc_base RX descriptor caches - * efx->tx_dc_base TX descriptor caches - */ -void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) -{ - unsigned vi_count, buftbl_min; - - /* Account for the buffer table entries backing the datapath channels - * and the descriptor caches for those channels. - */ - buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + - efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + - efx->n_channels * EFX_MAX_EVQ_SIZE) - * sizeof(efx_qword_t) / EFX_BUF_SIZE); - vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); - -#ifdef CONFIG_SFC_SRIOV - if (efx_sriov_wanted(efx)) { - unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; - - efx->vf_buftbl_base = buftbl_min; - - vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; - vi_count = max(vi_count, EFX_VI_BASE); - buftbl_free = (sram_lim_qw - buftbl_min - - vi_count * vi_dc_entries); - - entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * - efx_vf_size(efx)); - vf_limit = min(buftbl_free / entries_per_vf, - (1024U - EFX_VI_BASE) >> efx->vi_scale); - - if (efx->vf_count > vf_limit) { - netif_err(efx, probe, efx->net_dev, - "Reducing VF count from from %d to %d\n", - efx->vf_count, vf_limit); - efx->vf_count = vf_limit; - } - vi_count += efx->vf_count * efx_vf_size(efx); - } -#endif - - efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; - efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; -} - -u32 efx_nic_fpga_ver(struct efx_nic *efx) -{ - efx_oword_t altera_build; - efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); - return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); -} - -void efx_nic_init_common(struct efx_nic *efx) -{ - efx_oword_t temp; - - /* Set positions of descriptor caches in SRAM. */ - EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); - efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); - efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); - - /* Set TX descriptor cache size. */ - BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); - efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); - - /* Set RX descriptor cache size. Set low watermark to size-8, as - * this allows most efficient prefetching. - */ - BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); - efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); - efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); - - /* Program INT_KER address */ - EFX_POPULATE_OWORD_2(temp, - FRF_AZ_NORM_INT_VEC_DIS_KER, - EFX_INT_MODE_USE_MSI(efx), - FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); - efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); - - if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) - /* Use an interrupt level unused by event queues */ - efx->irq_level = 0x1f; - else - /* Use a valid MSI-X vector */ - efx->irq_level = 0; - - /* Enable all the genuinely fatal interrupts. (They are still - * masked by the overall interrupt mask, controlled by - * falcon_interrupts()). - * - * Note: All other fatal interrupts are enabled - */ - EFX_POPULATE_OWORD_3(temp, - FRF_AZ_ILL_ADR_INT_KER_EN, 1, - FRF_AZ_RBUF_OWN_INT_KER_EN, 1, - FRF_AZ_TBUF_OWN_INT_KER_EN, 1); - if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) - EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); - EFX_INVERT_OWORD(temp); - efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); - - efx_nic_push_rx_indir_table(efx); - - /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be - * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. - */ - efx_reado(efx, &temp, FR_AZ_TX_RESERVED); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); - /* Enable SW_EV to inherit in char driver - assume harmless here */ - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); - /* Prefetch threshold 2 => fetch when descriptor cache half empty */ - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); - /* Disable hardware watchdog which can misfire */ - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); - /* Squash TX of packets of 16 bytes or less */ - if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) - EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); - efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); - - if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { - EFX_POPULATE_OWORD_4(temp, - /* Default values */ - FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, - FRF_BZ_TX_PACE_SB_AF, 0xb, - FRF_BZ_TX_PACE_FB_BASE, 0, - /* Allow large pace values in the - * fast bin. */ - FRF_BZ_TX_PACE_BIN_TH, - FFE_BZ_TX_PACE_RESERVED); - efx_writeo(efx, &temp, FR_BZ_TX_PACE); - } -} - /* Register dump */ #define REGISTER_REVISION_A 1 diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 21f662c..25e25b6 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -34,7 +34,7 @@ static inline int efx_nic_rev(struct efx_nic *efx) return efx->type->revision; } -extern u32 efx_nic_fpga_ver(struct efx_nic *efx); +extern u32 efx_farch_fpga_ver(struct efx_nic *efx); /* NIC has two interlinked PCI functions for the same port. */ static inline bool efx_nic_is_dual_func(struct efx_nic *efx) @@ -42,6 +42,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx) return efx_nic_rev(efx) < EFX_REV_FALCON_B0; } +/* Read the current event from the event queue */ +static inline efx_qword_t *efx_event(struct efx_channel *channel, + unsigned int index) +{ + return ((efx_qword_t *) (channel->eventq.buf.addr)) + + (index & channel->eventq_mask); +} + +/* See if an event is present + * + * We check both the high and low dword of the event for all ones. We + * wrote all ones when we cleared the event, and no valid event can + * have all ones in either its high or low dwords. This approach is + * robust against reordering. + * + * Note that using a single 64-bit comparison is incorrect; even + * though the CPU read will be atomic, the DMA write may not be. + */ +static inline int efx_event_present(efx_qword_t *event) +{ + return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | + EFX_DWORD_IS_ALL_ONES(event->dword[1])); +} + +/* Returns a pointer to the specified transmit descriptor in the TX + * descriptor queue belonging to the specified channel. + */ +static inline efx_qword_t * +efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) +{ + return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; +} + +/* Decide whether to push a TX descriptor to the NIC vs merely writing + * the doorbell. This can reduce latency when we are adding a single + * descriptor to an empty queue, but is otherwise pointless. Further, + * Falcon and Siena have hardware bugs (SF bug 33851) that may be + * triggered if we don't check this. + */ +static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + + if (empty_read_count == 0) + return false; + + tx_queue->empty_read_count = 0; + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 + && tx_queue->write_count - write_count == 1; +} + +/* Returns a pointer to the specified descriptor in the RX descriptor queue */ +static inline efx_qword_t * +efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; +} + enum { PHY_TYPE_NONE = 0, PHY_TYPE_TXC43128 = 1, @@ -258,25 +317,93 @@ extern const struct efx_nic_type siena_a0_nic_type; extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); /* TX data path */ -extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); -extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); -extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); -extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); +static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) +{ + return tx_queue->efx->type->tx_probe(tx_queue); +} +static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_init(tx_queue); +} +static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_remove(tx_queue); +} +static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_write(tx_queue); +} /* RX data path */ -extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); -extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); -extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); -extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); -extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue); +static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_probe(rx_queue); +} +static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_init(rx_queue); +} +static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_remove(rx_queue); +} +static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_write(rx_queue); +} +static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_defer_refill(rx_queue); +} /* Event data path */ -extern int efx_nic_probe_eventq(struct efx_channel *channel); -extern void efx_nic_init_eventq(struct efx_channel *channel); -extern void efx_nic_fini_eventq(struct efx_channel *channel); -extern void efx_nic_remove_eventq(struct efx_channel *channel); -extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); -extern void efx_nic_eventq_read_ack(struct efx_channel *channel); +static inline int efx_nic_probe_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_probe(channel); +} +static inline void efx_nic_init_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_init(channel); +} +static inline void efx_nic_fini_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_fini(channel); +} +static inline void efx_nic_remove_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_remove(channel); +} +static inline int +efx_nic_process_eventq(struct efx_channel *channel, int quota) +{ + return channel->efx->type->ev_process(channel, quota); +} +static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) +{ + channel->efx->type->ev_read_ack(channel); +} +extern void efx_nic_event_test_start(struct efx_channel *channel); + +/* Falcon/Siena queue operations */ +extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); +extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue); +extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); +extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); +extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue); +extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); +extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue); +extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); +extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); +extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); +extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); +extern int efx_farch_ev_probe(struct efx_channel *channel); +extern void efx_farch_ev_init(struct efx_channel *channel); +extern void efx_farch_ev_fini(struct efx_channel *channel); +extern void efx_farch_ev_remove(struct efx_channel *channel); +extern int efx_farch_ev_process(struct efx_channel *channel, int quota); +extern void efx_farch_ev_read_ack(struct efx_channel *channel); +extern void efx_farch_ev_test_generate(struct efx_channel *channel); + extern bool efx_nic_event_present(struct efx_channel *channel); /* Some statistics are computed as A - B where A and B each increase @@ -297,15 +424,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff) *stat = diff; } -/* Interrupts and test events */ +/* Interrupts */ extern int efx_nic_init_interrupt(struct efx_nic *efx); -extern void efx_nic_enable_interrupts(struct efx_nic *efx); -extern void efx_nic_event_test_start(struct efx_channel *channel); extern void efx_nic_irq_test_start(struct efx_nic *efx); -extern void efx_nic_disable_interrupts(struct efx_nic *efx); extern void efx_nic_fini_interrupt(struct efx_nic *efx); -extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); -extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); + +/* Falcon/Siena interrupts */ +extern void efx_farch_irq_enable_master(struct efx_nic *efx); +extern void efx_farch_irq_test_generate(struct efx_nic *efx); +extern void efx_farch_irq_disable_master(struct efx_nic *efx); +extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); +extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); +extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) { @@ -317,36 +447,40 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) } /* Global Resources */ -extern int efx_farch_fini_dmaq(struct efx_nic *efx); +extern int efx_nic_flush_queues(struct efx_nic *efx); extern void siena_prepare_flush(struct efx_nic *efx); +extern int efx_farch_fini_dmaq(struct efx_nic *efx); extern void siena_finish_flush(struct efx_nic *efx); extern void falcon_start_nic_stats(struct efx_nic *efx); extern void falcon_stop_nic_stats(struct efx_nic *efx); extern int falcon_reset_xaui(struct efx_nic *efx); -extern void -efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); -extern void efx_nic_init_common(struct efx_nic *efx); -extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); +extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); +extern void efx_farch_init_common(struct efx_nic *efx); +static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) +{ + efx->type->rx_push_indir_table(efx); +} +extern void efx_farch_rx_push_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags); void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); /* Tests */ -struct efx_nic_register_test { +struct efx_farch_register_test { unsigned address; efx_oword_t mask; }; -extern int efx_nic_test_registers(struct efx_nic *efx, - const struct efx_nic_register_test *regs, - size_t n_regs); +extern int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs); extern size_t efx_nic_get_regs_len(struct efx_nic *efx); extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); #define EFX_MAX_FLUSH_TIME 5000 -extern void efx_generate_event(struct efx_nic *efx, unsigned int evq, - efx_qword_t *event); +extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event); #endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index b4c1d43..6a833d5 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -63,7 +63,7 @@ void siena_finish_flush(struct efx_nic *efx) efx_mcdi_set_mac(efx); } -static const struct efx_nic_register_test siena_register_tests[] = { +static const struct efx_farch_register_test siena_register_tests[] = { { FR_AZ_ADR_REGION, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, { FR_CZ_USR_EV_CFG, @@ -107,8 +107,8 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) goto out; tests->registers = - efx_nic_test_registers(efx, siena_register_tests, - ARRAY_SIZE(siena_register_tests)) + efx_farch_test_registers(efx, siena_register_tests, + ARRAY_SIZE(siena_register_tests)) ? -1 : 1; rc = efx_mcdi_reset(efx, reset_method); @@ -184,7 +184,7 @@ static void siena_dimension_resources(struct efx_nic *efx) * the buffer table and descriptor caches. In theory we can * map both blocks to one port, but we don't. */ - efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); + efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); } static int siena_probe_nic(struct efx_nic *efx) @@ -200,7 +200,7 @@ static int siena_probe_nic(struct efx_nic *efx) return -ENOMEM; efx->nic_data = nic_data; - if (efx_nic_fpga_ver(efx) != 0) { + if (efx_farch_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Siena FPGA not supported\n"); rc = -ENODEV; @@ -351,7 +351,7 @@ static int siena_init_nic(struct efx_nic *efx) EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); - efx_nic_init_common(efx); + efx_farch_init_common(efx); return 0; } @@ -705,6 +705,28 @@ const struct efx_nic_type siena_a0_nic_type = { .mcdi_poll_response = siena_mcdi_poll_response, .mcdi_read_response = siena_mcdi_read_response, .mcdi_poll_reboot = siena_mcdi_poll_reboot, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = efx_farch_legacy_interrupt, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .rx_push_indir_table = efx_farch_rx_push_indir_table, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, .revision = EFX_REV_SIENA_A0, .mem_map_size = (FR_CZ_MC_TREG_SMEM + diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 4d214bc..4b8eef9 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -464,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) VFDI_EV_SEQ, (vf->msg_seqno & 0xff), VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); ++vf->msg_seqno; - efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx), - &event); + efx_farch_generate_event(efx, + EFX_VI_BASE + vf->index * efx_vf_size(efx), + &event); } static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, -- cgit v0.10.2 From 5c044e6368995e6590b616aa0043fe03670aefaa Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Wed, 21 Aug 2013 10:08:55 +0300 Subject: net/mlx4_en: Coding style cleanup in mlx4_en_dcbnl_ieee_setpfc() Fix some coding style issues in this function. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 9d4a1ea..f71fba9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -160,6 +160,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_dev *mdev = priv->mdev; int err; @@ -169,15 +170,17 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, pfc->mbc, pfc->delay); - priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en; - priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en; + prof->rx_pause = !!pfc->pfc_en; + prof->tx_pause = !!pfc->pfc_en; + prof->rx_ppp = pfc->pfc_en; + prof->tx_ppp = pfc->pfc_en; err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp); + prof->tx_pause, + prof->tx_ppp, + prof->rx_pause, + prof->rx_ppp); if (err) en_err(priv, "Failed setting pause params\n"); -- cgit v0.10.2 From 0e316f1b9bd86261fb3283b3ac2eeebd5ef56f1b Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Wed, 21 Aug 2013 10:08:56 +0300 Subject: net/mlx4_en: Disable global flow control when PFC enabled Fix a bug when FC and PFC are enabled/disabled at the same time. According to ConnectX-3 Programmer Manual these two features are mutial exclusive. So make sure when enabling PFC to turn off global FC and vise versa. Otherwise it hurts the performance. Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index f71fba9..b4881b6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -170,8 +170,8 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, pfc->mbc, pfc->delay); - prof->rx_pause = !!pfc->pfc_en; - prof->tx_pause = !!pfc->pfc_en; + prof->rx_pause = !pfc->pfc_en; + prof->tx_pause = !pfc->pfc_en; prof->rx_ppp = pfc->pfc_en; prof->tx_ppp = pfc->pfc_en; -- cgit v0.10.2 From bd2f631d7c60502e24e801a3eea1806868951bcc Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Wed, 21 Aug 2013 10:08:57 +0300 Subject: net/mlx4_en: Notify user when TX ring in error state When hardware gets into error state, must notify user about it. When QP in error state no traffic will be tx'ed from the attached tx_ring. Driver should know how to recover from this unexpected state. I will send later on the recovery flow, but having the print shouldn't be delayed. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 6dcca98..0d691e3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -362,6 +362,15 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) */ rmb(); + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + MLX4_CQE_OPCODE_ERROR)) { + struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; + + en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", + cqe_err->vendor_err_syndrome, + cqe_err->syndrome); + } + /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; -- cgit v0.10.2 From 237a3a3b1544574ead3dc459063424de8c0319e0 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Wed, 21 Aug 2013 10:08:58 +0300 Subject: net/mlx4_en: Fix handling of dma_map failure Result of skb_frag_dma_map() and dma_map_single() wasn't checked. Added a check and proper handling in case of failure. Moved the mapping to the beginning of mlx4_en_xmit(), before updating the ring data structure to make error handling easier. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0d691e3..c28868b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -588,6 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct device *ddev = priv->ddev; struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_desc *tx_desc; struct mlx4_wqe_data_seg *data; @@ -674,6 +675,56 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->skb = skb; tx_info->nr_txbb = nr_txbb; + if (lso_header_size) + data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4, + DS_SIZE)); + else + data = &tx_desc->data; + + /* valid only for none inline segments */ + tx_info->data_offset = (void *)data - (void *)tx_desc; + + tx_info->linear = (lso_header_size < skb_headlen(skb) && + !is_inline(skb, NULL)) ? 1 : 0; + + data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; + + if (is_inline(skb, &fragptr)) { + tx_info->inl = 1; + } else { + /* Map fragments */ + for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + frag = &skb_shinfo(skb)->frags[i]; + dma = skb_frag_dma_map(ddev, frag, + 0, skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(ddev, dma)) + goto tx_drop_unmap; + + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(skb_frag_size(frag)); + --data; + } + + /* Map linear part */ + if (tx_info->linear) { + u32 byte_count = skb_headlen(skb) - lso_header_size; + dma = dma_map_single(ddev, skb->data + + lso_header_size, byte_count, + PCI_DMA_TODEVICE); + if (dma_mapping_error(ddev, dma)) + goto tx_drop_unmap; + + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(byte_count); + } + tx_info->inl = 0; + } + /* * For timestamping add flag to skb_shinfo and * set flag for further reference @@ -720,8 +771,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Copy headers; * note that we already verified that it is linear */ memcpy(tx_desc->lso.header, skb->data, lso_header_size); - data = ((void *) &tx_desc->lso + - ALIGN(lso_header_size + 4, DS_SIZE)); priv->port_stats.tso_packets++; i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + @@ -733,7 +782,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) op_own = cpu_to_be32(MLX4_OPCODE_SEND) | ((ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); - data = &tx_desc->data; tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); ring->packets++; @@ -742,38 +790,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); - - /* valid only for none inline segments */ - tx_info->data_offset = (void *) data - (void *) tx_desc; - - tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; - data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; - - if (!is_inline(skb, &fragptr)) { - /* Map fragments */ - for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { - frag = &skb_shinfo(skb)->frags[i]; - dma = skb_frag_dma_map(priv->ddev, frag, - 0, skb_frag_size(frag), - DMA_TO_DEVICE); - data->addr = cpu_to_be64(dma); - data->lkey = cpu_to_be32(mdev->mr.key); - wmb(); - data->byte_count = cpu_to_be32(skb_frag_size(frag)); - --data; - } - - /* Map linear part */ - if (tx_info->linear) { - dma = dma_map_single(priv->ddev, skb->data + lso_header_size, - skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); - data->addr = cpu_to_be64(dma); - data->lkey = cpu_to_be32(mdev->mr.key); - wmb(); - data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); - } - tx_info->inl = 0; - } else { + if (tx_info->inl) { build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); tx_info->inl = 1; } @@ -813,6 +830,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; +tx_drop_unmap: + en_err(priv, "DMA mapping error\n"); + + for (i++; i < skb_shinfo(skb)->nr_frags; i++) { + data++; + dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + } + tx_drop: dev_kfree_skb_any(skb); priv->stats.tx_dropped++; -- cgit v0.10.2 From 5f1cd200c4e4e1a6bce946aaac40c7a10427f3ed Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Wed, 21 Aug 2013 10:08:59 +0300 Subject: net/mlx4_en: Reduce scope of local variables in mlx4_en_xmit Some variables could have their scope reduced. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c28868b..0698c82 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -592,14 +592,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_desc *tx_desc; struct mlx4_wqe_data_seg *data; - struct skb_frag_struct *frag; struct mlx4_en_tx_info *tx_info; - struct ethhdr *ethh; int tx_ind = 0; int nr_txbb; int desc_size; int real_size; - dma_addr_t dma; u32 index, bf_index; __be32 op_own; u16 vlan_tag = 0; @@ -694,6 +691,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) } else { /* Map fragments */ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + struct skb_frag_struct *frag; + dma_addr_t dma; + frag = &skb_shinfo(skb)->frags[i]; dma = skb_frag_dma_map(ddev, frag, 0, skb_frag_size(frag), @@ -711,6 +711,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Map linear part */ if (tx_info->linear) { u32 byte_count = skb_headlen(skb) - lso_header_size; + dma_addr_t dma; + dma = dma_map_single(ddev, skb->data + lso_header_size, byte_count, PCI_DMA_TODEVICE); @@ -749,6 +751,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) } if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) { + struct ethhdr *ethh; + /* Copy dst mac address to wqe. This allows loopback in eSwitch, * so that VFs and PF can communicate with each other */ -- cgit v0.10.2 From 5bc283e51327e249459caab1aff505000ae2beeb Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 8 Oct 2012 21:43:00 +0100 Subject: sfc: Translate MCDI error numbers received in events Currently we only translate error codes in efx_mcdi_poll(), but we also need to do so in efx_mcdi_ev_cpl(). The reason we didn't notice before is that the MC firmware error codes are mostly taken from Unix/Linux and no translation is necessary on most architectures. Make sure we notice any future failure by changing the sign of resprc (matching the kernel convention) and BUG if it's ever positive at command completion. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index dffadb2..4f3301d 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -105,16 +105,39 @@ efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen) efx->type->mcdi_read_response(efx, outbuf, 4, outlen); } +static int efx_mcdi_errno(unsigned int mcdi_err) +{ + switch (mcdi_err) { + case 0: + return 0; +#define TRANSLATE_ERROR(name) \ + case MC_CMD_ERR_ ## name: \ + return -name; + TRANSLATE_ERROR(ENOENT); + TRANSLATE_ERROR(EINTR); + TRANSLATE_ERROR(EACCES); + TRANSLATE_ERROR(EBUSY); + TRANSLATE_ERROR(EINVAL); + TRANSLATE_ERROR(EDEADLK); + TRANSLATE_ERROR(ENOSYS); + TRANSLATE_ERROR(ETIME); +#undef TRANSLATE_ERROR + default: + return -EIO; + } +} + static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned long time, finish; unsigned int respseq, respcmd, error; - unsigned int rc, spins; + unsigned int spins; efx_dword_t reg; + int rc; /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ - rc = -efx_mcdi_poll_reboot(efx); + rc = efx_mcdi_poll_reboot(efx); if (rc) goto out; @@ -151,32 +174,15 @@ static int efx_mcdi_poll(struct efx_nic *efx) if (error && mcdi->resplen == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); - rc = EIO; + rc = -EIO; } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { netif_err(efx, hw, efx->net_dev, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", respseq, mcdi->seqno); - rc = EIO; + rc = -EIO; } else if (error) { efx->type->mcdi_read_response(efx, ®, 4, 4); - switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { -#define TRANSLATE_ERROR(name) \ - case MC_CMD_ERR_ ## name: \ - rc = name; \ - break - TRANSLATE_ERROR(ENOENT); - TRANSLATE_ERROR(EINTR); - TRANSLATE_ERROR(EACCES); - TRANSLATE_ERROR(EBUSY); - TRANSLATE_ERROR(EINVAL); - TRANSLATE_ERROR(EDEADLK); - TRANSLATE_ERROR(ENOSYS); - TRANSLATE_ERROR(ETIME); -#undef TRANSLATE_ERROR - default: - rc = EIO; - break; - } + rc = efx_mcdi_errno(EFX_DWORD_FIELD(reg, EFX_DWORD_0)); } else rc = 0; @@ -271,7 +277,7 @@ static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) } static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, - unsigned int datalen, unsigned int errno) + unsigned int datalen, unsigned int mcdi_err) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); bool wake = false; @@ -287,7 +293,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, "MC response mismatch tx seq 0x%x rx " "seq 0x%x\n", seqno, mcdi->seqno); } else { - mcdi->resprc = errno; + mcdi->resprc = efx_mcdi_errno(mcdi_err); mcdi->resplen = datalen; wake = true; @@ -357,10 +363,12 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, * a spurious efx_mcdi_ev_cpl() running concurrently by * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); - rc = -mcdi->resprc; + rc = mcdi->resprc; resplen = mcdi->resplen; spin_unlock_bh(&mcdi->iface_lock); + BUG_ON(rc > 0); + if (rc == 0) { efx_mcdi_copyout(efx, outbuf, min(outlen, mcdi->resplen)); @@ -491,7 +499,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, case MCDI_EVENT_CODE_BADSSERT: netif_err(efx, hw, efx->net_dev, "MC watchdog or assertion failure at 0x%x\n", data); - efx_mcdi_ev_death(efx, EINTR); + efx_mcdi_ev_death(efx, -EINTR); break; case MCDI_EVENT_CODE_PMNOTICE: @@ -517,7 +525,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, break; case MCDI_EVENT_CODE_REBOOT: netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); - efx_mcdi_ev_death(efx, EIO); + efx_mcdi_ev_death(efx, -EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: /* MAC stats are gather lazily. We can ignore this. */ diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 0bfed2a..9b536d0 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -42,7 +42,7 @@ enum efx_mcdi_mode { * Serialised by @lock * @credits: Number of spurious MCDI completion events allowed before we * trigger a fatal error. Protected by @lock - * @resprc: Returned MCDI completion + * @resprc: Response error/success code (Linux numbering) * @resplen: Returned payload length */ struct efx_mcdi_iface { @@ -52,7 +52,7 @@ struct efx_mcdi_iface { enum efx_mcdi_mode mode; unsigned int credits; unsigned int seqno; - unsigned int resprc; + int resprc; size_t resplen; }; -- cgit v0.10.2 From f2b0befd1dd2f0b08feff8e66741d56b239af7b8 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 20 Aug 2013 20:35:50 +0100 Subject: sfc: Update MCDI protocol definitions for EF10 EF10 controllers do not have shared memory for communication with the MC; instead it reads requests and writes responses in host memory, which allows for longer messages. It is also responsible for all datapath control operations and hardware resource allocation, which requires a large number of new commands and adds more possible error cases. MCDI v2 extends the message header to support this. Update the MCDI protocol definition header to include v2 lengths, errors and messages, and a few definitions specific to the SFC9100 family (codenames Farmingdale and Huntington) which is the first generation of EF10. Some messages have been extended, so adjust the code accordingly: - The request for MC_CMD_DRV_ATTACH now includes a datapath firmware ID. This is ignored by Siena but we should fill it in anyway, initially always specifying low-latency datapath. - The response for MC_CMD_GET_LOOPBACK_MODES now includes a 40G field. Accept shorter responses that don't include it. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 4f3301d..c6c5830 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -593,6 +593,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, driver_operating ? 1 : 0); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 5f2846c..9e824f7 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2009-2011 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2009-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -21,6 +21,13 @@ #define MC_FW_STATE_BOOTING (4) /* The Scheduler has started. */ #define MC_FW_STATE_SCHED (8) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. + * Unlike a warm boot, assume DMEM has been reloaded, so that + * the MC persistent data must be reinitialised. */ +#define MC_FW_TEPID_BOOT_OK (16) +/* BIST state has been initialized */ +#define MC_FW_BIST_INIT_OK (128) /* Siena MC shared memmory offsets */ /* The 'doorbell' addresses are hard-wired to alert the MC when written */ @@ -39,18 +46,21 @@ #define MC_STATUS_DWORD_REBOOT (0xb007b007) #define MC_STATUS_DWORD_ASSERT (0xdeaddead) +/* Check whether an mcfw version (in host order) belongs to a bootloader */ +#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007) + /* The current version of the MCDI protocol. * * Note that the ROM burnt into the card only talks V0, so at the very * least every driver must support version 0 and MCDI_PCOL_VERSION */ -#define MCDI_PCOL_VERSION 1 +#define MCDI_PCOL_VERSION 2 /* Unused commands: 0x23, 0x27, 0x30, 0x31 */ /* MCDI version 1 * - * Each MCDI request starts with an MCDI_HEADER, which is a 32byte + * Each MCDI request starts with an MCDI_HEADER, which is a 32bit * structure, filled in by the client. * * 0 7 8 16 20 22 23 24 31 @@ -87,9 +97,11 @@ #define MCDI_HEADER_DATALEN_LBN 8 #define MCDI_HEADER_DATALEN_WIDTH 8 #define MCDI_HEADER_SEQ_LBN 16 -#define MCDI_HEADER_RSVD_LBN 20 -#define MCDI_HEADER_RSVD_WIDTH 2 #define MCDI_HEADER_SEQ_WIDTH 4 +#define MCDI_HEADER_RSVD_LBN 20 +#define MCDI_HEADER_RSVD_WIDTH 1 +#define MCDI_HEADER_NOT_EPOCH_LBN 21 +#define MCDI_HEADER_NOT_EPOCH_WIDTH 1 #define MCDI_HEADER_ERROR_LBN 22 #define MCDI_HEADER_ERROR_WIDTH 1 #define MCDI_HEADER_RESPONSE_LBN 23 @@ -101,8 +113,10 @@ /* Maximum number of payload bytes */ #define MCDI_CTL_SDU_LEN_MAX_V1 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V2 0x400 + +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2 -#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1 /* The MC can generate events for two reasons: * - To complete a shared memory request if XFLAGS_EVREQ was set @@ -147,22 +161,69 @@ #define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc +/* Operation not permitted. */ +#define MC_CMD_ERR_EPERM 1 /* Non-existent command target */ #define MC_CMD_ERR_ENOENT 2 /* assert() has killed the MC */ #define MC_CMD_ERR_EINTR 4 +/* I/O failure */ +#define MC_CMD_ERR_EIO 5 +/* Try again */ +#define MC_CMD_ERR_EAGAIN 11 +/* Out of memory */ +#define MC_CMD_ERR_ENOMEM 12 /* Caller does not hold required locks */ #define MC_CMD_ERR_EACCES 13 /* Resource is currently unavailable (e.g. lock contention) */ #define MC_CMD_ERR_EBUSY 16 +/* No such device */ +#define MC_CMD_ERR_ENODEV 19 /* Invalid argument to target */ #define MC_CMD_ERR_EINVAL 22 +/* Out of range */ +#define MC_CMD_ERR_ERANGE 34 /* Non-recursive resource is already acquired */ #define MC_CMD_ERR_EDEADLK 35 /* Operation not implemented */ #define MC_CMD_ERR_ENOSYS 38 /* Operation timed out */ #define MC_CMD_ERR_ETIME 62 +/* Link has been severed */ +#define MC_CMD_ERR_ENOLINK 67 +/* Protocol error */ +#define MC_CMD_ERR_EPROTO 71 +/* Operation not supported */ +#define MC_CMD_ERR_ENOTSUP 95 +/* Address not available */ +#define MC_CMD_ERR_EADDRNOTAVAIL 99 +/* Not connected */ +#define MC_CMD_ERR_ENOTCONN 107 +/* Operation already in progress */ +#define MC_CMD_ERR_EALREADY 114 + +/* Resource allocation failed. */ +#define MC_CMD_ERR_ALLOC_FAIL 0x1000 +/* V-adaptor not found. */ +#define MC_CMD_ERR_NO_VADAPTOR 0x1001 +/* EVB port not found. */ +#define MC_CMD_ERR_NO_EVB_PORT 0x1002 +/* V-switch not found. */ +#define MC_CMD_ERR_NO_VSWITCH 0x1003 +/* Too many VLAN tags. */ +#define MC_CMD_ERR_VLAN_LIMIT 0x1004 +/* Bad PCI function number. */ +#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005 +/* Invalid VLAN mode. */ +#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006 +/* Invalid v-switch type. */ +#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007 +/* Invalid v-port type. */ +#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008 +/* MAC address exists. */ +#define MC_CMD_ERR_MAC_EXIST 0x1009 +/* Slave core not present */ +#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a #define MC_CMD_ERR_CODE_OFST 0 @@ -180,9 +241,11 @@ /* Vectors in the boot ROM */ /* Point to the copycode entry point. */ -#define MC_BOOTROM_COPYCODE_VEC (0x7f4) +#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) +#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) /* Points to the recovery mode entry point. */ -#define MC_BOOTROM_NOFLASH_VEC (0x7f8) +#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) /* The command set exported by the boot ROM (MCDI v0) */ #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ @@ -211,16 +274,29 @@ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) +/* Version 2 adds an optional argument to error returns: the errno value + * may be followed by the (0-based) number of the first argument that + * could not be processed. + */ +#define MC_CMD_ERR_ARG_OFST 4 + +/* No space */ +#define MC_CMD_ERR_ENOSPC 28 + /* MCDI_EVENT structuredef */ #define MCDI_EVENT_LEN 8 #define MCDI_EVENT_CONT_LBN 32 #define MCDI_EVENT_CONT_WIDTH 1 #define MCDI_EVENT_LEVEL_LBN 33 #define MCDI_EVENT_LEVEL_WIDTH 3 -#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */ -#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */ -#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */ -#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */ +/* enum: Info. */ +#define MCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MCDI_EVENT_LEVEL_FATAL 0x3 #define MCDI_EVENT_DATA_OFST 0 #define MCDI_EVENT_CMDDONE_SEQ_LBN 0 #define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 @@ -232,9 +308,14 @@ #define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 -#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */ -#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */ -#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */ +/* enum: 100Mbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +/* enum: 1Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +/* enum: 10Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +/* enum: 40Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 @@ -249,26 +330,80 @@ #define MCDI_EVENT_FWALERT_DATA_WIDTH 24 #define MCDI_EVENT_FWALERT_REASON_LBN 0 #define MCDI_EVENT_FWALERT_REASON_WIDTH 8 -#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */ +/* enum: SRAM Access. */ +#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 #define MCDI_EVENT_FLR_VF_LBN 0 #define MCDI_EVENT_FLR_VF_WIDTH 8 #define MCDI_EVENT_TX_ERR_TXQ_LBN 0 #define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 #define MCDI_EVENT_TX_ERR_TYPE_LBN 12 #define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 -#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */ -#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */ -#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */ +/* enum: Descriptor loader reported failure */ +#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 +/* enum: Descriptor ring empty and no EOP seen for packet */ +#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 +/* enum: Overlength packet */ +#define MCDI_EVENT_TX_ERR_2BIG 0x3 +/* enum: Malformed option descriptor */ +#define MCDI_EVENT_TX_BAD_OPTDESC 0x5 +/* enum: Option descriptor part way through a packet */ +#define MCDI_EVENT_TX_OPT_IN_PKT 0x8 +/* enum: DMA or PIO data access error */ +#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 #define MCDI_EVENT_TX_ERR_INFO_LBN 16 #define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 #define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 #define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 #define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 #define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 -#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */ -#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */ -#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */ -#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */ +/* enum: PLL lost lock */ +#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 +/* enum: Filter overflow (PDMA) */ +#define MCDI_EVENT_PTP_ERR_FILTER 0x2 +/* enum: FIFO overflow (FPGA) */ +#define MCDI_EVENT_PTP_ERR_FIFO 0x3 +/* enum: Merge queue overflow */ +#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 +#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 +/* enum: AOE failed to load - no valid image? */ +#define MCDI_EVENT_AOE_NO_LOAD 0x1 +/* enum: AOE FC reported an exception */ +#define MCDI_EVENT_AOE_FC_ASSERT 0x2 +/* enum: AOE FC watchdogged */ +#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3 +/* enum: AOE FC failed to start */ +#define MCDI_EVENT_AOE_FC_NO_START 0x4 +/* enum: Generic AOE fault - likely to have been reported via other means too + * but intended for use by aoex driver. + */ +#define MCDI_EVENT_AOE_FAULT 0x5 +/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6 +/* enum: AOE loaded successfully */ +#define MCDI_EVENT_AOE_LOAD 0x7 +/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_DMA 0x8 +/* enum: AOE byteblaster connected/disconnected (Connection status in + * AOE_ERR_DATA) + */ +#define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +#define MCDI_EVENT_AOE_ERR_DATA_LBN 8 +#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_LBN 0 +#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_LBN 16 +#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -277,21 +412,60 @@ #define MCDI_EVENT_EV_CODE_WIDTH 4 #define MCDI_EVENT_CODE_LBN 44 #define MCDI_EVENT_CODE_WIDTH 8 -#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */ -#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */ -#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */ -#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */ -#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */ -#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */ -#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */ -#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */ -#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */ -#define MCDI_EVENT_CODE_FLR 0xa /* enum */ -#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */ -#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */ -#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */ -#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */ -#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */ +/* enum: Bad assert. */ +#define MCDI_EVENT_CODE_BADSSERT 0x1 +/* enum: PM Notice. */ +#define MCDI_EVENT_CODE_PMNOTICE 0x2 +/* enum: Command done. */ +#define MCDI_EVENT_CODE_CMDDONE 0x3 +/* enum: Link change. */ +#define MCDI_EVENT_CODE_LINKCHANGE 0x4 +/* enum: Sensor Event. */ +#define MCDI_EVENT_CODE_SENSOREVT 0x5 +/* enum: Schedule error. */ +#define MCDI_EVENT_CODE_SCHEDERR 0x6 +/* enum: Reboot. */ +#define MCDI_EVENT_CODE_REBOOT 0x7 +/* enum: Mac stats DMA. */ +#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 +/* enum: Firmware alert. */ +#define MCDI_EVENT_CODE_FWALERT 0x9 +/* enum: Function level reset. */ +#define MCDI_EVENT_CODE_FLR 0xa +/* enum: Transmit error */ +#define MCDI_EVENT_CODE_TX_ERR 0xb +/* enum: Tx flush has completed */ +#define MCDI_EVENT_CODE_TX_FLUSH 0xc +/* enum: PTP packet received timestamp */ +#define MCDI_EVENT_CODE_PTP_RX 0xd +/* enum: PTP NIC failure */ +#define MCDI_EVENT_CODE_PTP_FAULT 0xe +/* enum: PTP PPS event */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf +/* enum: Rx flush has completed */ +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +/* enum: Receive error */ +#define MCDI_EVENT_CODE_RX_ERR 0x11 +/* enum: AOE fault */ +#define MCDI_EVENT_CODE_AOE 0x12 +/* enum: Network port calibration failed (VCAL). */ +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +/* enum: HW PPS event */ +#define MCDI_EVENT_CODE_HW_PPS 0x14 +/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and + * a different format) + */ +#define MCDI_EVENT_CODE_MC_REBOOT 0x15 +/* enum: the MC has detected a parity error */ +#define MCDI_EVENT_CODE_PAR_ERR 0x16 +/* enum: the MC has detected a correctable error */ +#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 +/* enum: the MC has detected an uncorrectable error */ +#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: Artificial event generated by host and posted via MC for test + * purposes. + */ +#define MCDI_EVENT_CODE_TESTGEN 0xfa #define MCDI_EVENT_CMDDONE_DATA_OFST 0 #define MCDI_EVENT_CMDDONE_DATA_LBN 0 #define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 @@ -307,15 +481,114 @@ #define MCDI_EVENT_TX_ERR_DATA_OFST 0 #define MCDI_EVENT_TX_ERR_DATA_LBN 0 #define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 +/* Seconds field of timestamp */ #define MCDI_EVENT_PTP_SECONDS_OFST 0 #define MCDI_EVENT_PTP_SECONDS_LBN 0 #define MCDI_EVENT_PTP_SECONDS_WIDTH 32 +/* Nanoseconds field of timestamp */ #define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 #define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 #define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 +/* Lowest four bytes of sourceUUID from PTP packet */ #define MCDI_EVENT_PTP_UUID_OFST 0 #define MCDI_EVENT_PTP_UUID_LBN 0 #define MCDI_EVENT_PTP_UUID_WIDTH 32 +#define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LBN 0 +#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LBN 0 +#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 + +/* FCDI_EVENT structuredef */ +#define FCDI_EVENT_LEN 8 +#define FCDI_EVENT_CONT_LBN 32 +#define FCDI_EVENT_CONT_WIDTH 1 +#define FCDI_EVENT_LEVEL_LBN 33 +#define FCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define FCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define FCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define FCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define FCDI_EVENT_LEVEL_FATAL 0x3 +#define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 +#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 +#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ +#define FCDI_EVENT_LINK_UP 0x1 /* enum */ +#define FCDI_EVENT_DATA_LBN 0 +#define FCDI_EVENT_DATA_WIDTH 32 +#define FCDI_EVENT_SRC_LBN 36 +#define FCDI_EVENT_SRC_WIDTH 8 +#define FCDI_EVENT_EV_CODE_LBN 60 +#define FCDI_EVENT_EV_CODE_WIDTH 4 +#define FCDI_EVENT_CODE_LBN 44 +#define FCDI_EVENT_CODE_WIDTH 8 +/* enum: The FC was rebooted. */ +#define FCDI_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define FCDI_EVENT_CODE_ASSERT 0x2 +/* enum: DDR3 test result. */ +#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3 +/* enum: Link status. */ +#define FCDI_EVENT_CODE_LINK_STATE 0x4 +/* enum: A timed read is ready to be serviced. */ +#define FCDI_EVENT_CODE_TIMED_READ 0x5 +/* enum: One or more PPS IN events */ +#define FCDI_EVENT_CODE_PPS_IN 0x6 +/* enum: One or more PPS OUT events */ +#define FCDI_EVENT_CODE_PPS_OUT 0x7 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 +#define FCDI_EVENT_ASSERT_TYPE_LBN 36 +#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 +#define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LBN 0 +#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EVENT_PPS_COUNT_LBN 0 +#define FCDI_EVENT_PPS_COUNT_WIDTH 32 + +/* FCDI_EXTENDED_EVENT structuredef */ +#define FCDI_EXTENDED_EVENT_LENMIN 16 +#define FCDI_EXTENDED_EVENT_LENMAX 248 +#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num)) +/* Number of timestamps following */ +#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 +/* Seconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 +/* Nanoseconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 +/* Timestamp records comprising the event */ +#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8 +#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1 +#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64 /***********************************/ @@ -367,11 +640,27 @@ /* MC_CMD_COPYCODE_IN msgrequest */ #define MC_CMD_COPYCODE_IN_LEN 16 +/* Source address */ #define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +/* enum: Entering the main image via a copy of a single word from and to this + * address indicates that it should not attempt to start the datapath CPUs. + * This is useful for certain soft rebooting scenarios. (Huntington only) + */ +#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 +/* enum: Entering the main image via a copy of a single word from and to this + * address indicates that it should not attempt to parse any configuration from + * flash. (In addition, the datapath CPUs will not be started, as for + * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for + * certain soft rebooting scenarios. (Huntington only) + */ +#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +/* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +/* Address of where to jump after copy. */ #define MC_CMD_COPYCODE_IN_JUMP_OFST 12 -#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */ +/* enum: Control should return to the caller rather than jumping */ +#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* MC_CMD_COPYCODE_OUT msgresponse */ #define MC_CMD_COPYCODE_OUT_LEN 0 @@ -379,11 +668,13 @@ /***********************************/ /* MC_CMD_SET_FUNC + * Select function for function-specific commands. */ #define MC_CMD_SET_FUNC 0x4 /* MC_CMD_SET_FUNC_IN msgrequest */ #define MC_CMD_SET_FUNC_IN_LEN 4 +/* Set function */ #define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 /* MC_CMD_SET_FUNC_OUT msgresponse */ @@ -392,6 +683,7 @@ /***********************************/ /* MC_CMD_GET_BOOT_STATUS + * Get the instruction address from which the MC booted. */ #define MC_CMD_GET_BOOT_STATUS 0x5 @@ -400,7 +692,10 @@ /* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ #define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 +/* ?? */ #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +/* enum: indicates that the MC wasn't flash booted */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 @@ -412,25 +707,38 @@ /***********************************/ /* MC_CMD_GET_ASSERTS - * Get and clear any assertion status. + * Get (and optionally clear) the current assertion status. Only + * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other + * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS */ #define MC_CMD_GET_ASSERTS 0x6 /* MC_CMD_GET_ASSERTS_IN msgrequest */ #define MC_CMD_GET_ASSERTS_IN_LEN 4 +/* Set to clear assertion */ #define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 /* MC_CMD_GET_ASSERTS_OUT msgresponse */ #define MC_CMD_GET_ASSERTS_OUT_LEN 140 +/* Assertion status flag. */ #define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 -#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */ -#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */ -#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */ -#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */ +/* enum: No assertions have failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 +/* enum: A system-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 +/* enum: A thread-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 +/* enum: The system was reset by the watchdog. */ +#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 +/* enum: An illegal address trap stopped the system (huntington and later) */ +#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 +/* Failing PC value */ #define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +/* Saved GP regs */ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 @@ -443,9 +751,12 @@ /* MC_CMD_LOG_CTRL_IN msgrequest */ #define MC_CMD_LOG_CTRL_IN_LEN 8 +/* Log destination */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 -#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */ -#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */ +/* enum: UART. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 +/* enum: Event queue. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ @@ -461,11 +772,20 @@ /* MC_CMD_GET_VERSION_IN msgrequest */ #define MC_CMD_GET_VERSION_IN_LEN 0 -/* MC_CMD_GET_VERSION_V0_OUT msgresponse */ +/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */ +#define MC_CMD_GET_VERSION_EXT_IN_LEN 4 +/* placeholder, set to 0 */ +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 + +/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ #define MC_CMD_GET_VERSION_V0_OUT_LEN 4 #define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 -#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */ -#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */ +/* enum: Reserved version number to indicate "any" version. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff +/* enum: Bootrom version value for Siena. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 +/* enum: Bootrom version value for Huntington. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 /* MC_CMD_GET_VERSION_OUT msgresponse */ #define MC_CMD_GET_VERSION_OUT_LEN 32 @@ -473,6 +793,7 @@ /* Enum values, see field(s): */ /* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ #define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +/* 128bit mask of functions supported by the current firmware */ #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 #define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 @@ -480,46 +801,22 @@ #define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 #define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 - -/***********************************/ -/* MC_CMD_GET_FPGAREG - * Read multiple bytes from PTP FPGA. - */ -#define MC_CMD_GET_FPGAREG 0x9 - -/* MC_CMD_GET_FPGAREG_IN msgrequest */ -#define MC_CMD_GET_FPGAREG_IN_LEN 8 -#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0 -#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4 - -/* MC_CMD_GET_FPGAREG_OUT msgresponse */ -#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1 -#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252 -#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num)) -#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0 -#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1 -#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1 -#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252 - - -/***********************************/ -/* MC_CMD_PUT_FPGAREG - * Write multiple bytes to PTP FPGA. - */ -#define MC_CMD_PUT_FPGAREG 0xa - -/* MC_CMD_PUT_FPGAREG_IN msgrequest */ -#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5 -#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252 -#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num)) -#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0 -#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4 -#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1 -#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1 -#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248 - -/* MC_CMD_PUT_FPGAREG_OUT msgresponse */ -#define MC_CMD_PUT_FPGAREG_OUT_LEN 0 +/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ +#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 /***********************************/ @@ -530,32 +827,74 @@ /* MC_CMD_PTP_IN msgrequest */ #define MC_CMD_PTP_IN_LEN 1 +/* PTP operation code */ #define MC_CMD_PTP_IN_OP_OFST 0 #define MC_CMD_PTP_IN_OP_LEN 1 -#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */ -#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */ -#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */ -#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */ -#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */ -#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */ -#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */ -#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */ -#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */ -#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */ -#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */ -#define MC_CMD_PTP_OP_MAX 0xc /* enum */ +/* enum: Enable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_ENABLE 0x1 +/* enum: Disable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_DISABLE 0x2 +/* enum: Send a PTP packet. */ +#define MC_CMD_PTP_OP_TRANSMIT 0x3 +/* enum: Read the current NIC time. */ +#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 +/* enum: Get the current PTP status. */ +#define MC_CMD_PTP_OP_STATUS 0x5 +/* enum: Adjust the PTP NIC's time. */ +#define MC_CMD_PTP_OP_ADJUST 0x6 +/* enum: Synchronize host and NIC time. */ +#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 +/* enum: Basic manufacturing tests. */ +#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 +/* enum: Packet based manufacturing tests. */ +#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 +/* enum: Reset some of the PTP related statistics */ +#define MC_CMD_PTP_OP_RESET_STATS 0xa +/* enum: Debug operations to MC. */ +#define MC_CMD_PTP_OP_DEBUG 0xb +/* enum: Read an FPGA register */ +#define MC_CMD_PTP_OP_FPGAREAD 0xc +/* enum: Write an FPGA register */ +#define MC_CMD_PTP_OP_FPGAWRITE 0xd +/* enum: Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe +/* enum: Change Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf +/* enum: Set the MC packet filter VLAN tags for received PTP packets */ +#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 +/* enum: Set the MC packet filter UUID for received PTP packets */ +#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 +/* enum: Set the MC packet filter Domain for received PTP packets */ +#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 +/* enum: Set the clock source */ +#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 +/* enum: Reset value of Timer Reg. */ +#define MC_CMD_PTP_OP_RST_CLK 0x14 +/* enum: Enable the forwarding of PPS events to the host */ +#define MC_CMD_PTP_OP_PPS_ENABLE 0x15 +/* enum: Above this for future use. */ +#define MC_CMD_PTP_OP_MAX 0x16 /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 #define MC_CMD_PTP_IN_CMD_OFST 0 #define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 +/* Event queue for PTP events */ #define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 +/* PTP timestamping mode */ #define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 -#define MC_CMD_PTP_MODE_V1 0x0 /* enum */ -#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ -#define MC_CMD_PTP_MODE_V2 0x2 /* enum */ -#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ -#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */ +/* enum: PTP, version 1 */ +#define MC_CMD_PTP_MODE_V1 0x0 +/* enum: PTP, version 1, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V1_VLAN 0x1 +/* enum: PTP, version 2 */ +#define MC_CMD_PTP_MODE_V2 0x2 +/* enum: PTP, version 2, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V2_VLAN 0x3 +/* enum: PTP, version 2, with improved UUID filtering */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 +/* enum: FCoE (seconds and microseconds) */ +#define MC_CMD_PTP_MODE_FCOE 0x5 /* MC_CMD_PTP_IN_DISABLE msgrequest */ #define MC_CMD_PTP_IN_DISABLE_LEN 8 @@ -568,7 +907,9 @@ #define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Transmit packet length */ #define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +/* Transmit packet data */ #define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 @@ -588,19 +929,27 @@ #define MC_CMD_PTP_IN_ADJUST_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Frequency adjustment 40 bit fixed point ns */ #define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 #define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 #define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 #define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 -#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */ +/* enum: Number of fractional bits in frequency adjustment */ +#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* Time adjustment in seconds */ #define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +/* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ #define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Number of time readings to capture */ #define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +/* Host address in which to write "synchronization started" indication (64 + * bits) + */ #define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 #define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 #define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 @@ -615,86 +964,240 @@ #define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Enable or disable packet testing */ #define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 /* MC_CMD_PTP_IN_RESET_STATS msgrequest */ #define MC_CMD_PTP_IN_RESET_STATS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* Reset PTP statistics */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ /* MC_CMD_PTP_IN_DEBUG msgrequest */ #define MC_CMD_PTP_IN_DEBUG_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Debug operations */ #define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +/* MC_CMD_PTP_IN_FPGAREAD msgrequest */ +#define MC_CMD_PTP_IN_FPGAREAD_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 + +/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ +#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 +#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 + +/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ + +/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Number of VLAN tags, 0 if not VLAN */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +/* Set of VLAN tags to filter against */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3 + +/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* 1 to enable UUID filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +/* UUID to filter against */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16 + +/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* 1 to enable Domain filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +/* Domain number to filter against */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 + +/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Set the clock source. */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +/* enum: Internal. */ +#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 + +/* MC_CMD_PTP_IN_RST_CLK msgrequest */ +#define MC_CMD_PTP_IN_RST_CLK_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* Reset value of Timer Reg. */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* Enable or disable */ +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +/* enum: Enable */ +#define MC_CMD_PTP_ENABLE_PPS 0x0 +/* enum: Disable */ +#define MC_CMD_PTP_DISABLE_PPS 0x1 +/* Queueid to send events back */ +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 + /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 /* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ #define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 +/* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +/* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 /* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 +/* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +/* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 /* MC_CMD_PTP_OUT_STATUS msgresponse */ #define MC_CMD_PTP_OUT_STATUS_LEN 64 +/* Frequency of NIC's hardware clock */ #define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +/* Number of packets transmitted and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +/* Number of packets received and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +/* Number of packets timestamped by the FPGA */ #define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +/* Number of packets filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +/* Number of packets not filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +/* Number of PPS overflows (noise on input?) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +/* Number of PPS bad periods */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +/* Minimum period of PPS pulse */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +/* Maximum period of PPS pulse */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +/* Last period of PPS pulse */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +/* Mean period of PPS pulse */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +/* Minimum offset of PPS pulse (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +/* Maximum offset of PPS pulse (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +/* Last offset of PPS pulse (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +/* Mean offset of PPS pulse (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 #define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 #define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) +/* A set of host and NIC times */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 +/* Host time immediately before NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +/* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +/* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +/* Host time immediately after NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +/* Number of nanoseconds waited after reading NIC's hardware clock */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 +/* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 -#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */ -#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */ -#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */ -#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */ -#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */ -#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */ -#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */ -#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */ -#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */ -#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */ +/* enum: Successful test */ +#define MC_CMD_PTP_MANF_SUCCESS 0x0 +/* enum: FPGA load failed */ +#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 +/* enum: FPGA version invalid */ +#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 +/* enum: FPGA registers incorrect */ +#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 +/* enum: Oscillator possibly not working? */ +#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 +/* enum: Timestamps not increasing */ +#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 +/* enum: Mismatched packet count */ +#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 +/* enum: Mismatched packet count (Siena filter and FPGA) */ +#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 +/* enum: Not enough packets to perform timestamp check */ +#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 +/* enum: Timestamp trigger GPIO not working */ +#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 +/* Presence of external oscillator */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 +/* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +/* Number of packets received by FPGA */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +/* Number of packets received by Siena filters */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ +#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252 +#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num)) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 + /***********************************/ /* MC_CMD_CSR_READ32 @@ -704,6 +1207,7 @@ /* MC_CMD_CSR_READ32_IN msgrequest */ #define MC_CMD_CSR_READ32_IN_LEN 12 +/* Address */ #define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 #define MC_CMD_CSR_READ32_IN_STEP_OFST 4 #define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 @@ -712,6 +1216,7 @@ #define MC_CMD_CSR_READ32_OUT_LENMIN 4 #define MC_CMD_CSR_READ32_OUT_LENMAX 252 #define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) +/* The last dword is the status, not a value read */ #define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 #define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 #define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 @@ -728,6 +1233,7 @@ #define MC_CMD_CSR_WRITE32_IN_LENMIN 12 #define MC_CMD_CSR_WRITE32_IN_LENMAX 252 #define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) +/* Address */ #define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 #define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 @@ -741,6 +1247,48 @@ /***********************************/ +/* MC_CMD_HP + * These commands are used for HP related features. They are grouped under one + * MCDI command to avoid creating too many MCDI commands. + */ +#define MC_CMD_HP 0x54 + +/* MC_CMD_HP_IN msgrequest */ +#define MC_CMD_HP_IN_LEN 16 +/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at + * the specified address with the specified interval.When address is NULL, + * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current + * state / 2: (debug) Show temperature reported by one of the supported + * sensors. + */ +#define MC_CMD_HP_IN_SUBCMD_OFST 0 +/* enum: OCSD (Option Card Sensor Data) sub-command. */ +#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 +/* enum: Last known valid HP sub-command. */ +#define MC_CMD_HP_IN_LAST_SUBCMD 0x0 +/* The address to the array of sensor fields. (Or NULL to use a sub-command.) + */ +#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8 +/* The requested update interval, in seconds. (Or the sub-command if ADDR is + * NULL.) + */ +#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 + +/* MC_CMD_HP_OUT msgresponse */ +#define MC_CMD_HP_OUT_LEN 4 +#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +/* enum: OCSD stopped for this card. */ +#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 +/* enum: OCSD was successfully started with the address provided. */ +#define MC_CMD_HP_OUT_OCSD_STARTED 0x2 +/* enum: OCSD was already started for this card. */ +#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3 + + +/***********************************/ /* MC_CMD_STACKINFO * Get stack information. */ @@ -753,6 +1301,7 @@ #define MC_CMD_STACKINFO_OUT_LENMIN 12 #define MC_CMD_STACKINFO_OUT_LENMAX 252 #define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) +/* (thread ptr, stack size, free space) for each thread in system */ #define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 #define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 #define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 @@ -767,19 +1316,35 @@ /* MC_CMD_MDIO_READ_IN msgrequest */ #define MC_CMD_MDIO_READ_IN_LEN 16 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ #define MC_CMD_MDIO_READ_IN_BUS_OFST 0 -#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */ -#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */ +/* enum: Internal. */ +#define MC_CMD_MDIO_BUS_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 +/* Port address */ #define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +/* Device Address or clause 22. */ #define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 -#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */ +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +#define MC_CMD_MDIO_CLAUSE22 0x20 +/* Address */ #define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 /* MC_CMD_MDIO_READ_OUT msgresponse */ #define MC_CMD_MDIO_READ_OUT_LEN 8 +/* Value */ #define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +/* Status the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ #define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 -#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */ +/* enum: Good. */ +#define MC_CMD_MDIO_STATUS_GOOD 0x8 /***********************************/ @@ -790,18 +1355,34 @@ /* MC_CMD_MDIO_WRITE_IN msgrequest */ #define MC_CMD_MDIO_WRITE_IN_LEN 20 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ #define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +/* enum: Internal. */ /* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ +/* enum: External. */ /* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ +/* Port address */ #define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +/* Device Address or clause 22. */ #define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ /* MC_CMD_MDIO_CLAUSE22 0x20 */ +/* Address */ #define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +/* Value */ #define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 /* MC_CMD_MDIO_WRITE_OUT msgresponse */ #define MC_CMD_MDIO_WRITE_OUT_LEN 4 +/* Status; the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ #define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +/* enum: Good. */ /* MC_CMD_MDIO_STATUS_GOOD 0x8 */ @@ -815,6 +1396,9 @@ #define MC_CMD_DBI_WRITE_IN_LENMIN 12 #define MC_CMD_DBI_WRITE_IN_LENMAX 252 #define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) +/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset + * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. + */ #define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 #define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 #define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 @@ -828,9 +1412,15 @@ #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 -#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4 -#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32 -#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 @@ -838,69 +1428,111 @@ /***********************************/ /* MC_CMD_PORT_READ32 - * Read a 32-bit register from the indirect port register map. + * Read a 32-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. */ #define MC_CMD_PORT_READ32 0x14 /* MC_CMD_PORT_READ32_IN msgrequest */ #define MC_CMD_PORT_READ32_IN_LEN 4 +/* Address */ #define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 /* MC_CMD_PORT_READ32_OUT msgresponse */ #define MC_CMD_PORT_READ32_OUT_LEN 8 +/* Value */ #define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +/* Status */ #define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 /***********************************/ /* MC_CMD_PORT_WRITE32 - * Write a 32-bit register to the indirect port register map. + * Write a 32-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. */ #define MC_CMD_PORT_WRITE32 0x15 /* MC_CMD_PORT_WRITE32_IN msgrequest */ #define MC_CMD_PORT_WRITE32_IN_LEN 8 +/* Address */ #define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +/* Value */ #define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 /* MC_CMD_PORT_WRITE32_OUT msgresponse */ #define MC_CMD_PORT_WRITE32_OUT_LEN 4 +/* Status */ #define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 /***********************************/ /* MC_CMD_PORT_READ128 - * Read a 128-bit register from the indirect port register map. + * Read a 128-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. */ #define MC_CMD_PORT_READ128 0x16 /* MC_CMD_PORT_READ128_IN msgrequest */ #define MC_CMD_PORT_READ128_IN_LEN 4 +/* Address */ #define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 /* MC_CMD_PORT_READ128_OUT msgresponse */ #define MC_CMD_PORT_READ128_OUT_LEN 20 +/* Value */ #define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 #define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 +/* Status */ #define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 /***********************************/ /* MC_CMD_PORT_WRITE128 - * Write a 128-bit register to the indirect port register map. + * Write a 128-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. */ #define MC_CMD_PORT_WRITE128 0x17 /* MC_CMD_PORT_WRITE128_IN msgrequest */ #define MC_CMD_PORT_WRITE128_IN_LEN 20 +/* Address */ #define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +/* Value */ #define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 #define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 /* MC_CMD_PORT_WRITE128_OUT msgresponse */ #define MC_CMD_PORT_WRITE128_OUT_LEN 4 +/* Status */ #define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +/* MC_CMD_CAPABILITIES structuredef */ +#define MC_CMD_CAPABILITIES_LEN 4 +/* Small buf table. */ +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0 +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1 +/* Turbo mode (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_LBN 1 +#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1 +/* Turbo mode active (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2 +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1 +/* PTP offload. */ +#define MC_CMD_CAPABILITIES_PTP_LBN 3 +#define MC_CMD_CAPABILITIES_PTP_WIDTH 1 +/* AOE mode. */ +#define MC_CMD_CAPABILITIES_AOE_LBN 4 +#define MC_CMD_CAPABILITIES_AOE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5 +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6 +#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1 +#define MC_CMD_CAPABILITIES_RESERVED_LBN 7 +#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25 + /***********************************/ /* MC_CMD_GET_BOARD_CFG @@ -918,18 +1550,10 @@ #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 +/* See MC_CMD_CAPABILITIES */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 -#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */ -#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */ -#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */ -#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */ -#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */ -#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */ -#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */ -#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */ +/* See MC_CMD_CAPABILITIES */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 -/* Enum values, see field(s): */ -/* CAPABILITIES_PORT0 */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 @@ -938,6 +1562,11 @@ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 +/* This field contains a 16-bit value for each of the types of NVRAM area. The + * values are defined in the firmware/mc/platform/.c file for a specific board + * type, but otherwise have no meaning to the MC; they are used by the driver + * to manage selection of appropriate firmware updates. + */ #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 @@ -946,7 +1575,7 @@ /***********************************/ /* MC_CMD_DBI_READX - * Read DBI register(s). + * Read DBI register(s) -- extended functionality */ #define MC_CMD_DBI_READX 0x19 @@ -954,6 +1583,7 @@ #define MC_CMD_DBI_READX_IN_LENMIN 8 #define MC_CMD_DBI_READX_IN_LENMAX 248 #define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) +/* Each Read op consists of an address (offset 0), VF/CS2) */ #define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 #define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 #define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 @@ -965,11 +1595,27 @@ #define MC_CMD_DBI_READX_OUT_LENMIN 4 #define MC_CMD_DBI_READX_OUT_LENMAX 252 #define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) +/* Value */ #define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 #define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 #define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 #define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 +/* MC_CMD_DBIRDOP_TYPEDEF structuredef */ +#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32 + /***********************************/ /* MC_CMD_SET_RAND_SEED @@ -979,6 +1625,7 @@ /* MC_CMD_SET_RAND_SEED_IN msgrequest */ #define MC_CMD_SET_RAND_SEED_IN_LEN 16 +/* Seed value. */ #define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 #define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 @@ -988,7 +1635,7 @@ /***********************************/ /* MC_CMD_LTSSM_HIST - * Retrieve the history of the PCIE LTSSM. + * Retrieve the history of the LTSSM, if the build supports it. */ #define MC_CMD_LTSSM_HIST 0x1b @@ -999,6 +1646,7 @@ #define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 #define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 #define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) +/* variable number of LTSSM values, as bytes. The history is read-to-clear. */ #define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 #define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 #define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 @@ -1007,41 +1655,47 @@ /***********************************/ /* MC_CMD_DRV_ATTACH - * Inform MCPU that this port is managed on the host. + * Inform MCPU that this port is managed on the host (i.e. driver active). For + * Huntington, also request the preferred datapath firmware to use if possible + * (it may not be possible for this request to be fulfilled; the driver must + * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which + * features are actually available). The FIRMWARE_ID field is ignored by older + * platforms. */ #define MC_CMD_DRV_ATTACH 0x1c /* MC_CMD_DRV_ATTACH_IN msgrequest */ -#define MC_CMD_DRV_ATTACH_IN_LEN 8 +#define MC_CMD_DRV_ATTACH_IN_LEN 12 +/* new state (0=detached, 1=attached) to set if UPDATE=1 */ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +/* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +/* enum: Prefer to use full featured firmware */ +#define MC_CMD_FW_FULL_FEATURED 0x0 +/* enum: Prefer to use firmware with fewer features but lower latency */ +#define MC_CMD_FW_LOW_LATENCY 0x1 /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 +/* previous or existing state (0=detached, 1=attached) */ #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 - -/***********************************/ -/* MC_CMD_NCSI_PROD - * Trigger an NC-SI event. +/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 +/* previous or existing state (0=detached, 1=attached) */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +/* Flags associated with this function */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +/* enum: Labels the lowest-numbered function visible to the OS */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 +/* enum: The function can control the link state of the physical port it is + * bound to. */ -#define MC_CMD_NCSI_PROD 0x1d - -/* MC_CMD_NCSI_PROD_IN msgrequest */ -#define MC_CMD_NCSI_PROD_IN_LEN 4 -#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0 -#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */ -#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */ -#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */ -#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0 -#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1 -#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1 -#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1 -#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2 -#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1 - -/* MC_CMD_NCSI_PROD_OUT msgresponse */ -#define MC_CMD_NCSI_PROD_OUT_LEN 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 +/* enum: The function can perform privileged operations */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 /***********************************/ @@ -1052,6 +1706,7 @@ /* MC_CMD_SHMUART_IN msgrequest */ #define MC_CMD_SHMUART_IN_LEN 4 +/* ??? */ #define MC_CMD_SHMUART_IN_FLAG_OFST 0 /* MC_CMD_SHMUART_OUT msgresponse */ @@ -1059,13 +1714,33 @@ /***********************************/ +/* MC_CMD_PORT_RESET + * Generic per-port reset. There is no equivalent for per-board reset. Locks + * required: None; Return code: 0, ETIME. NOTE: This command is deprecated - + * use MC_CMD_ENTITY_RESET instead. + */ +#define MC_CMD_PORT_RESET 0x20 + +/* MC_CMD_PORT_RESET_IN msgrequest */ +#define MC_CMD_PORT_RESET_IN_LEN 0 + +/* MC_CMD_PORT_RESET_OUT msgresponse */ +#define MC_CMD_PORT_RESET_OUT_LEN 0 + + +/***********************************/ /* MC_CMD_ENTITY_RESET - * Generic per-port reset. + * Generic per-resource reset. There is no equivalent for per-board reset. + * Locks required: None; Return code: 0, ETIME. NOTE: This command is an + * extended version of the deprecated MC_CMD_PORT_RESET with added fields. */ #define MC_CMD_ENTITY_RESET 0x20 /* MC_CMD_ENTITY_RESET_IN msgrequest */ #define MC_CMD_ENTITY_RESET_IN_LEN 4 +/* Optional flags field. Omitting this will perform a "legacy" reset action + * (TBD). + */ #define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 @@ -1082,7 +1757,9 @@ /* MC_CMD_PCIE_CREDITS_IN msgrequest */ #define MC_CMD_PCIE_CREDITS_IN_LEN 8 +/* poll period. 0 is disabled */ #define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +/* wipe statistics */ #define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */ @@ -1143,7 +1820,7 @@ /***********************************/ /* MC_CMD_PUTS - * puts(3) implementation over MCDI + * Copy the given ASCII string out onto UART and/or out of the network port. */ #define MC_CMD_PUTS 0x23 @@ -1169,7 +1846,8 @@ /***********************************/ /* MC_CMD_GET_PHY_CFG - * Report PHY configuration. + * Report PHY configuration. This guarantees to succeed even if the PHY is in a + * 'zombie' state. Locks required: None */ #define MC_CMD_GET_PHY_CFG 0x24 @@ -1178,6 +1856,7 @@ /* MC_CMD_GET_PHY_CFG_OUT msgresponse */ #define MC_CMD_GET_PHY_CFG_OUT_LEN 72 +/* flags */ #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 @@ -1193,7 +1872,9 @@ #define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 #define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 #define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 +/* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +/* Bitmask of supported capabilities */ #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 #define MC_CMD_PHY_CAP_10HDX_LBN 1 #define MC_CMD_PHY_CAP_10HDX_WIDTH 1 @@ -1215,20 +1896,36 @@ #define MC_CMD_PHY_CAP_ASYM_WIDTH 1 #define MC_CMD_PHY_CAP_AN_LBN 10 #define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_LBN 11 +#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_LBN 12 +#define MC_CMD_PHY_CAP_DDM_WIDTH 1 +/* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +/* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +/* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +/* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 #define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 +/* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 -#define MC_CMD_MEDIA_XAUI 0x1 /* enum */ -#define MC_CMD_MEDIA_CX4 0x2 /* enum */ -#define MC_CMD_MEDIA_KX4 0x3 /* enum */ -#define MC_CMD_MEDIA_XFP 0x4 /* enum */ -#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */ -#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */ +/* enum: Xaui. */ +#define MC_CMD_MEDIA_XAUI 0x1 +/* enum: CX4. */ +#define MC_CMD_MEDIA_CX4 0x2 +/* enum: KX4. */ +#define MC_CMD_MEDIA_KX4 0x3 +/* enum: XFP Far. */ +#define MC_CMD_MEDIA_XFP 0x4 +/* enum: SFP+. */ +#define MC_CMD_MEDIA_SFP_PLUS 0x5 +/* enum: 10GBaseT. */ +#define MC_CMD_MEDIA_BASE_T 0x6 #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 -#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */ +/* enum: Native clause 22 */ +#define MC_CMD_MMD_CLAUSE22 0x0 #define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ #define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ #define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ @@ -1236,7 +1933,8 @@ #define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ #define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ #define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ -#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */ +/* enum: Clause22 proxied over clause45 by PHY. */ +#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d #define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ #define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ #define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 @@ -1245,18 +1943,31 @@ /***********************************/ /* MC_CMD_START_BIST - * Start a BIST test on the PHY. + * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST + * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) */ #define MC_CMD_START_BIST 0x25 /* MC_CMD_START_BIST_IN msgrequest */ #define MC_CMD_START_BIST_IN_LEN 4 +/* Type of test. */ #define MC_CMD_START_BIST_IN_TYPE_OFST 0 -#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */ -#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */ -#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */ -#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */ -#define MC_CMD_PHY_BIST 0x5 /* enum */ +/* enum: Run the PHY's short cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 +/* enum: Run the PHY's long cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 +/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */ +#define MC_CMD_BPX_SERDES_BIST 0x3 +/* enum: Run the MC loopback tests. */ +#define MC_CMD_MC_LOOPBACK_BIST 0x4 +/* enum: Run the PHY's standard BIST. */ +#define MC_CMD_PHY_BIST 0x5 +/* enum: Run MC RAM test. */ +#define MC_CMD_MC_MEM_BIST 0x6 +/* enum: Run Port RAM test. */ +#define MC_CMD_PORT_MEM_BIST 0x7 +/* enum: Run register test. */ +#define MC_CMD_REG_BIST 0x8 /* MC_CMD_START_BIST_OUT msgresponse */ #define MC_CMD_START_BIST_OUT_LEN 0 @@ -1264,7 +1975,12 @@ /***********************************/ /* MC_CMD_POLL_BIST - * Poll for BIST completion. + * Poll for BIST completion. Returns a single status code, and optionally some + * PHY specific bist output. The driver should only consume the BIST output + * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't + * successfully parse the BIST output, it should still respect the pass/Fail in + * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0, + * EACCES (if PHY_LOCK is not held). */ #define MC_CMD_POLL_BIST 0x26 @@ -1273,15 +1989,21 @@ /* MC_CMD_POLL_BIST_OUT msgresponse */ #define MC_CMD_POLL_BIST_OUT_LEN 8 +/* result */ #define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 -#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */ -#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */ -#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */ -#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */ +/* enum: Running. */ +#define MC_CMD_POLL_BIST_RUNNING 0x1 +/* enum: Passed. */ +#define MC_CMD_POLL_BIST_PASSED 0x2 +/* enum: Failed. */ +#define MC_CMD_POLL_BIST_FAILED 0x3 +/* enum: Timed-out. */ +#define MC_CMD_POLL_BIST_TIMEOUT 0x4 #define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ #define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 +/* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ @@ -1289,42 +2011,116 @@ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +/* Status of each channel A */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 -#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */ -#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */ -#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */ -#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */ -#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */ +/* enum: Ok. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 +/* enum: Open. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 +/* enum: Intra-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 +/* enum: Inter-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 +/* enum: Busy. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 +/* Status of each channel B */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ +/* Status of each channel C */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ +/* Status of each channel D */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ /* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ #define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 +/* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 -#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */ -#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */ +/* enum: Complete. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 +/* enum: Bus switch off I2C write. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 +/* enum: Bus switch off I2C no access IO exp. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 +/* enum: Bus switch off I2C no access module. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 +/* enum: IO exp I2C configure. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 +/* enum: Bus switch I2C no cross talk. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 +/* enum: Module presence. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 +/* enum: Module ID I2C access. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 +/* enum: Module ID sane value. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 + +/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +/* enum: Test has completed. */ +#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 +/* enum: RAM test - walk ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1 +/* enum: RAM test - walk zeros. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2 +/* enum: RAM test - walking inversions zeros/ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3 +/* enum: RAM test - walking inversions checkerboard. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4 +/* enum: Register test - set / clear individual bits. */ +#define MC_CMD_POLL_BIST_MEM_REG 0x5 +/* enum: ECC error detected. */ +#define MC_CMD_POLL_BIST_MEM_ECC 0x6 +/* Failure address, only valid if result is POLL_BIST_FAILED */ +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +/* Bus or address space to which the failure address corresponds */ +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +/* enum: MC MIPS bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 +/* enum: CSR IREG bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 +/* enum: RX DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 +/* enum: TX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 +/* enum: TX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 +/* enum: RX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 +/* enum: TX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* Pattern written to RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +/* Actual value read from RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +/* ECC error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +/* ECC parity error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +/* ECC fatal error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 /***********************************/ /* MC_CMD_FLUSH_RX_QUEUES - * Flush receive queue(s). + * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ + * flushes should be initiated via this MCDI operation, rather than via + * directly writing FLUSH_CMD. + * + * The flush is completed (either done/fail) asynchronously (after this command + * returns). The driver must still wait for flush done/failure events as usual. */ #define MC_CMD_FLUSH_RX_QUEUES 0x27 @@ -1343,7 +2139,7 @@ /***********************************/ /* MC_CMD_GET_LOOPBACK_MODES - * Get port's loopback modes. + * Returns a bitmask of loopback modes available at each speed. */ #define MC_CMD_GET_LOOPBACK_MODES 0x28 @@ -1351,61 +2147,116 @@ #define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 /* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ -#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40 +/* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 -#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */ -#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */ -#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */ -#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */ -#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */ -#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */ -#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */ -#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */ -#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */ -#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */ -#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */ -#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */ -#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */ -#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */ -#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */ -#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */ -#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */ -#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */ -#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */ -#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */ -#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */ -#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */ -#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */ -#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */ -#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */ -#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */ -#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */ +/* enum: None. */ +#define MC_CMD_LOOPBACK_NONE 0x0 +/* enum: Data. */ +#define MC_CMD_LOOPBACK_DATA 0x1 +/* enum: GMAC. */ +#define MC_CMD_LOOPBACK_GMAC 0x2 +/* enum: XGMII. */ +#define MC_CMD_LOOPBACK_XGMII 0x3 +/* enum: XGXS. */ +#define MC_CMD_LOOPBACK_XGXS 0x4 +/* enum: XAUI. */ +#define MC_CMD_LOOPBACK_XAUI 0x5 +/* enum: GMII. */ +#define MC_CMD_LOOPBACK_GMII 0x6 +/* enum: SGMII. */ +#define MC_CMD_LOOPBACK_SGMII 0x7 +/* enum: XGBR. */ +#define MC_CMD_LOOPBACK_XGBR 0x8 +/* enum: XFI. */ +#define MC_CMD_LOOPBACK_XFI 0x9 +/* enum: XAUI Far. */ +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +/* enum: GMII Far. */ +#define MC_CMD_LOOPBACK_GMII_FAR 0xb +/* enum: SGMII Far. */ +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +/* enum: XFI Far. */ +#define MC_CMD_LOOPBACK_XFI_FAR 0xd +/* enum: GPhy. */ +#define MC_CMD_LOOPBACK_GPHY 0xe +/* enum: PhyXS. */ +#define MC_CMD_LOOPBACK_PHYXS 0xf +/* enum: PCS. */ +#define MC_CMD_LOOPBACK_PCS 0x10 +/* enum: PMA-PMD. */ +#define MC_CMD_LOOPBACK_PMAPMD 0x11 +/* enum: Cross-Port. */ +#define MC_CMD_LOOPBACK_XPORT 0x12 +/* enum: XGMII-Wireside. */ +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +/* enum: XAUI Wireside. */ +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +/* enum: XAUI Wireside Far. */ +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +/* enum: XAUI Wireside near. */ +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +/* enum: GMII Wireside. */ +#define MC_CMD_LOOPBACK_GMII_WS 0x17 +/* enum: XFI Wireside. */ +#define MC_CMD_LOOPBACK_XFI_WS 0x18 +/* enum: XFI Wireside Far. */ +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +/* enum: PhyXS Wireside. */ +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +/* enum: PMA lanes MAC-Serdes. */ +#define MC_CMD_LOOPBACK_PMA_INT 0x1b +/* enum: KR Serdes Parallel (Encoder). */ +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +/* enum: KR Serdes Serial. */ +#define MC_CMD_LOOPBACK_SD_FAR 0x1d +/* enum: PMA lanes MAC-Serdes Wireside. */ +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +/* enum: KR Serdes Serial Wireside. */ +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +/* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 /* Enum values, see field(s): */ /* 100M */ +/* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 #define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 #define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 /* Enum values, see field(s): */ /* 100M */ +/* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 #define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 #define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 /* Enum values, see field(s): */ /* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ /***********************************/ /* MC_CMD_GET_LINK - * Read the unified MAC/PHY link state. + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. */ #define MC_CMD_GET_LINK 0x29 @@ -1414,9 +2265,15 @@ /* MC_CMD_GET_LINK_OUT msgresponse */ #define MC_CMD_GET_LINK_OUT_LEN 28 +/* near-side advertised capabilities */ #define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +/* link-partner advertised capabilities */ #define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ #define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +/* Current loopback setting. */ #define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ @@ -1429,10 +2286,14 @@ #define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 #define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 #define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +/* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 -#define MC_CMD_FCNTL_OFF 0x0 /* enum */ -#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */ -#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */ +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 @@ -1446,13 +2307,16 @@ /***********************************/ /* MC_CMD_SET_LINK - * Write the unified MAC/PHY link configuration. + * Write the unified MAC/PHY link configuration. Locks required: None. Return + * code: 0, EINVAL, ETIME */ #define MC_CMD_SET_LINK 0x2a /* MC_CMD_SET_LINK_IN msgrequest */ #define MC_CMD_SET_LINK_IN_LEN 16 +/* ??? */ #define MC_CMD_SET_LINK_IN_CAP_OFST 0 +/* Flags */ #define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 #define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 #define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 @@ -1460,9 +2324,13 @@ #define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 #define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 #define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 +/* Loopback mode. */ #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 /* MC_CMD_SET_LINK_OUT msgresponse */ @@ -1471,12 +2339,13 @@ /***********************************/ /* MC_CMD_SET_ID_LED - * Set indentification LED state. + * Set identification LED state. Locks required: None. Return code: 0, EINVAL */ #define MC_CMD_SET_ID_LED 0x2b /* MC_CMD_SET_ID_LED_IN msgrequest */ #define MC_CMD_SET_ID_LED_IN_LEN 4 +/* Set LED state. */ #define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 #define MC_CMD_LED_OFF 0x0 /* enum */ #define MC_CMD_LED_ON 0x1 /* enum */ @@ -1488,12 +2357,15 @@ /***********************************/ /* MC_CMD_SET_MAC - * Set MAC configuration. + * Set MAC configuration. Locks required: None. Return code: 0, EINVAL */ #define MC_CMD_SET_MAC 0x2c /* MC_CMD_SET_MAC_IN msgrequest */ #define MC_CMD_SET_MAC_IN_LEN 24 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ #define MC_CMD_SET_MAC_IN_MTU_OFST 0 #define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 #define MC_CMD_SET_MAC_IN_ADDR_OFST 8 @@ -1506,10 +2378,14 @@ #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +/* enum: Flow control is off. */ /* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ /* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ /* MC_CMD_FCNTL_BIDIR 0x2 */ -#define MC_CMD_FCNTL_AUTO 0x3 /* enum */ +/* enum: Auto neg flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 /* MC_CMD_SET_MAC_OUT msgresponse */ #define MC_CMD_SET_MAC_OUT_LEN 0 @@ -1517,12 +2393,18 @@ /***********************************/ /* MC_CMD_PHY_STATS - * Get generic PHY statistics. + * Get generic PHY statistics. This call returns the statistics for a generic + * PHY in a sparse array (indexed by the enumerate). Each value is represented + * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the + * statistics may be read from the message response. If DMA_ADDR != 0, then the + * statistics are dmad to that (page-aligned location). Locks required: None. + * Returns: 0, ETIME */ #define MC_CMD_PHY_STATS 0x2d /* MC_CMD_PHY_STATS_IN msgrequest */ #define MC_CMD_PHY_STATS_IN_LEN 8 +/* ??? */ #define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 #define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 #define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 @@ -1536,40 +2418,71 @@ #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS -#define MC_CMD_OUI 0x0 /* enum */ -#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */ -#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */ -#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */ -#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */ -#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */ -#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */ -#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */ -#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */ -#define MC_CMD_PCS_LINK_UP 0x9 /* enum */ -#define MC_CMD_PCS_RX_FAULT 0xa /* enum */ -#define MC_CMD_PCS_TX_FAULT 0xb /* enum */ -#define MC_CMD_PCS_BER 0xc /* enum */ -#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */ -#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */ -#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */ -#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */ -#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */ -#define MC_CMD_PHYXS_SYNC 0x12 /* enum */ -#define MC_CMD_AN_LINK_UP 0x13 /* enum */ -#define MC_CMD_AN_COMPLETE 0x14 /* enum */ -#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */ -#define MC_CMD_CL22_LINK_UP 0x16 /* enum */ -#define MC_CMD_PHY_NSTATS 0x17 /* enum */ +/* enum: OUI. */ +#define MC_CMD_OUI 0x0 +/* enum: PMA-PMD Link Up. */ +#define MC_CMD_PMA_PMD_LINK_UP 0x1 +/* enum: PMA-PMD RX Fault. */ +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +/* enum: PMA-PMD TX Fault. */ +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +/* enum: PMA-PMD Signal */ +#define MC_CMD_PMA_PMD_SIGNAL 0x4 +/* enum: PMA-PMD SNR A. */ +#define MC_CMD_PMA_PMD_SNR_A 0x5 +/* enum: PMA-PMD SNR B. */ +#define MC_CMD_PMA_PMD_SNR_B 0x6 +/* enum: PMA-PMD SNR C. */ +#define MC_CMD_PMA_PMD_SNR_C 0x7 +/* enum: PMA-PMD SNR D. */ +#define MC_CMD_PMA_PMD_SNR_D 0x8 +/* enum: PCS Link Up. */ +#define MC_CMD_PCS_LINK_UP 0x9 +/* enum: PCS RX Fault. */ +#define MC_CMD_PCS_RX_FAULT 0xa +/* enum: PCS TX Fault. */ +#define MC_CMD_PCS_TX_FAULT 0xb +/* enum: PCS BER. */ +#define MC_CMD_PCS_BER 0xc +/* enum: PCS Block Errors. */ +#define MC_CMD_PCS_BLOCK_ERRORS 0xd +/* enum: PhyXS Link Up. */ +#define MC_CMD_PHYXS_LINK_UP 0xe +/* enum: PhyXS RX Fault. */ +#define MC_CMD_PHYXS_RX_FAULT 0xf +/* enum: PhyXS TX Fault. */ +#define MC_CMD_PHYXS_TX_FAULT 0x10 +/* enum: PhyXS Align. */ +#define MC_CMD_PHYXS_ALIGN 0x11 +/* enum: PhyXS Sync. */ +#define MC_CMD_PHYXS_SYNC 0x12 +/* enum: AN link-up. */ +#define MC_CMD_AN_LINK_UP 0x13 +/* enum: AN Complete. */ +#define MC_CMD_AN_COMPLETE 0x14 +/* enum: AN 10GBaseT Status. */ +#define MC_CMD_AN_10GBT_STATUS 0x15 +/* enum: Clause 22 Link-Up. */ +#define MC_CMD_CL22_LINK_UP 0x16 +/* enum: (Last entry) */ +#define MC_CMD_PHY_NSTATS 0x17 /***********************************/ /* MC_CMD_MAC_STATS - * Get generic MAC statistics. + * Get generic MAC statistics. This call returns unified statistics maintained + * by the MC as it switches between the GMAC and XMAC. The MC will write out + * all supported stats. The driver should zero initialise the buffer to + * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is + * performed, and the statistics may be read from the message response. If + * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). + * Locks required: None. Returns: 0, ETIME */ #define MC_CMD_MAC_STATS 0x2e /* MC_CMD_MAC_STATS_IN msgrequest */ #define MC_CMD_MAC_STATS_IN_LEN 16 +/* ??? */ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 @@ -1686,6 +2599,7 @@ /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 +/* this is only used for the first record */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 @@ -1715,7 +2629,23 @@ /***********************************/ /* MC_CMD_MEMCPY - * Perform memory copy operation. + * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data + * embedded directly in the command. + * + * A common pattern is for a client to use generation counts to signal a dma + * update of a datastructure. To facilitate this, this MCDI operation can + * contain multiple requests which are executed in strict order. Requests take + * the form of duplicating the entire MCDI request continuously (including the + * requests record, which is ignored in all but the first structure) + * + * The source data can either come from a DMA from the host, or it can be + * embedded within the request directly, thereby eliminating a DMA read. To + * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and + * ADDR_LO=offset, and inserts the data at %offset from the start of the + * payload. It's the callers responsibility to ensure that the embedded data + * doesn't overlap the records. + * + * Returns: 0, EINVAL (invalid RID) */ #define MC_CMD_MEMCPY 0x31 @@ -1723,6 +2653,7 @@ #define MC_CMD_MEMCPY_IN_LENMIN 32 #define MC_CMD_MEMCPY_IN_LENMAX 224 #define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) +/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ #define MC_CMD_MEMCPY_IN_RECORD_OFST 0 #define MC_CMD_MEMCPY_IN_RECORD_LEN 32 #define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 @@ -1743,14 +2674,22 @@ #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 #define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ #define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ +/* A type value of 1 is unused. */ #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 -#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */ -#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */ -#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */ -#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */ -#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */ -#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */ -#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */ +/* enum: Magic */ +#define MC_CMD_WOL_TYPE_MAGIC 0x0 +/* enum: MS Windows Magic */ +#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 +/* enum: IPv4 Syn */ +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +/* enum: IPv6 Syn */ +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +/* enum: Bitmap */ +#define MC_CMD_WOL_TYPE_BITMAP 0x5 +/* enum: Link */ +#define MC_CMD_WOL_TYPE_LINK 0x6 +/* enum: (Above this for future use) */ +#define MC_CMD_WOL_TYPE_MAX 0x7 #define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 @@ -1820,7 +2759,7 @@ /***********************************/ /* MC_CMD_WOL_FILTER_REMOVE - * Remove a WoL filter. + * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS */ #define MC_CMD_WOL_FILTER_REMOVE 0x33 @@ -1834,7 +2773,8 @@ /***********************************/ /* MC_CMD_WOL_FILTER_RESET - * Reset (i.e. remove all) WoL filters. + * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0, + * ENOSYS */ #define MC_CMD_WOL_FILTER_RESET 0x34 @@ -1850,7 +2790,7 @@ /***********************************/ /* MC_CMD_SET_MCAST_HASH - * Set the MCASH hash value. + * Set the MCAST hash value without otherwise reconfiguring the MAC */ #define MC_CMD_SET_MCAST_HASH 0x35 @@ -1867,7 +2807,8 @@ /***********************************/ /* MC_CMD_NVRAM_TYPES - * Get virtual NVRAM partitions information. + * Return bitfield indicating available types of virtual NVRAM partitions. + * Locks required: none. Returns: 0 */ #define MC_CMD_NVRAM_TYPES 0x36 @@ -1876,26 +2817,54 @@ /* MC_CMD_NVRAM_TYPES_OUT msgresponse */ #define MC_CMD_NVRAM_TYPES_OUT_LEN 4 +/* Bit mask of supported types. */ #define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 -#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */ -#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */ -#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */ -#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */ -#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */ -#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */ -#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */ -#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */ -#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */ -#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */ -#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */ -#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */ -#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */ -#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */ +/* enum: Disabled callisto. */ +#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 +/* enum: MC firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 +/* enum: MC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 +/* enum: Static configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 +/* enum: Static configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 +/* enum: Dynamic configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 +/* enum: Dynamic configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 +/* enum: Expansion Rom. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 +/* enum: Expansion Rom Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 +/* enum: Expansion Rom Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 +/* enum: Phy Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa +/* enum: Phy Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb +/* enum: Log. */ +#define MC_CMD_NVRAM_TYPE_LOG 0xc +/* enum: FPGA image. */ +#define MC_CMD_NVRAM_TYPE_FPGA 0xd +/* enum: FPGA backup image */ +#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe +/* enum: FC firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW 0xf +/* enum: FC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10 +/* enum: CPLD image. */ +#define MC_CMD_NVRAM_TYPE_CPLD 0x11 +/* enum: Licensing information. */ +#define MC_CMD_NVRAM_TYPE_LICENSE 0x12 +/* enum: FC Log. */ +#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 /***********************************/ /* MC_CMD_NVRAM_INFO - * Read info about a virtual NVRAM partition. + * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0, + * EINVAL (bad type). */ #define MC_CMD_NVRAM_INFO 0x37 @@ -1915,13 +2884,19 @@ #define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 /***********************************/ /* MC_CMD_NVRAM_UPDATE_START - * Start a group of update operations on a virtual NVRAM partition. + * Start a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if + * PHY_LOCK required and not held). */ #define MC_CMD_NVRAM_UPDATE_START 0x38 @@ -1937,7 +2912,9 @@ /***********************************/ /* MC_CMD_NVRAM_READ - * Read data from a virtual NVRAM partition. + * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) */ #define MC_CMD_NVRAM_READ 0x39 @@ -1947,6 +2924,7 @@ /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +/* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 /* MC_CMD_NVRAM_READ_OUT msgresponse */ @@ -1961,7 +2939,9 @@ /***********************************/ /* MC_CMD_NVRAM_WRITE - * Write data to a virtual NVRAM partition. + * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) */ #define MC_CMD_NVRAM_WRITE 0x3a @@ -1985,7 +2965,9 @@ /***********************************/ /* MC_CMD_NVRAM_ERASE - * Erase sector(s) from a virtual NVRAM partition. + * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) */ #define MC_CMD_NVRAM_ERASE 0x3b @@ -2003,7 +2985,9 @@ /***********************************/ /* MC_CMD_NVRAM_UPDATE_FINISH - * Finish a group of update operations on a virtual NVRAM partition. + * Finish a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad + * type/offset/length), EACCES (if PHY_LOCK required and not held) */ #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c @@ -2021,6 +3005,20 @@ /***********************************/ /* MC_CMD_REBOOT * Reboot the MC. + * + * The AFTER_ASSERTION flag is intended to be used when the driver notices an + * assertion failure (at which point it is expected to perform a complete tear + * down and reinitialise), to allow both ports to reset the MC once in an + * atomic fashion. + * + * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, + * which means that they will automatically reboot out of the assertion + * handler, so this is in practise an optional operation. It is still + * recommended that drivers execute this to support custom firmwares with + * REBOOT_ON_ASSERT=0. + * + * Locks required: NONE Returns: Nothing. You get back a response with ERR=1, + * DATALEN=0 */ #define MC_CMD_REBOOT 0x3d @@ -2035,7 +3033,9 @@ /***********************************/ /* MC_CMD_SCHEDINFO - * Request scheduler info. + * Request scheduler info. Locks required: NONE. Returns: An array of + * (timeslice,maximum overrun), one for each thread, in ascending order of + * thread address. */ #define MC_CMD_SCHEDINFO 0x3e @@ -2054,14 +3054,24 @@ /***********************************/ /* MC_CMD_REBOOT_MODE + * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot + * mode to the specified value. Returns the old mode. */ #define MC_CMD_REBOOT_MODE 0x3f /* MC_CMD_REBOOT_MODE_IN msgrequest */ #define MC_CMD_REBOOT_MODE_IN_LEN 4 #define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 -#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */ -#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */ +/* enum: Normal. */ +#define MC_CMD_REBOOT_MODE_NORMAL 0x0 +/* enum: Power-on Reset. */ +#define MC_CMD_REBOOT_MODE_POR 0x2 +/* enum: Snapper. */ +#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 +/* enum: snapper fake POR */ +#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 +#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 /* MC_CMD_REBOOT_MODE_OUT msgresponse */ #define MC_CMD_REBOOT_MODE_OUT_LEN 4 @@ -2071,32 +3081,145 @@ /***********************************/ /* MC_CMD_SENSOR_INFO * Returns information about every available sensor. + * + * Each sensor has a single (16bit) value, and a corresponding state. The + * mapping between value and state is nominally determined by the MC, but may + * be implemented using up to 2 ranges per sensor. + * + * This call returns a mask (32bit) of the sensors that are supported by this + * platform, then an array of sensor information structures, in order of sensor + * type (but without gaps for unimplemented sensors). Each structure defines + * the ranges for the corresponding sensor. An unused range is indicated by + * equal limit values. If one range is used, a value outside that range results + * in STATE_FATAL. If two ranges are used, a value outside the second range + * results in STATE_FATAL while a value outside the first and inside the second + * range results in STATE_WARNING. + * + * Sensor masks and sensor information arrays are organised into pages. For + * backward compatibility, older host software can only use sensors in page 0. + * Bit 32 in the sensor mask was previously unused, and is no reserved for use + * as the next page flag. + * + * If the request does not contain a PAGE value then firmware will only return + * page 0 of sensor information, with bit 31 in the sensor mask cleared. + * + * If the request contains a PAGE value then firmware responds with the sensor + * mask and sensor information array for that page of sensors. In this case bit + * 31 in the mask is set if another page exists. + * + * Locks required: None Returns: 0 */ #define MC_CMD_SENSOR_INFO 0x41 /* MC_CMD_SENSOR_INFO_IN msgrequest */ #define MC_CMD_SENSOR_INFO_IN_LEN 0 +/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 + /* MC_CMD_SENSOR_INFO_OUT msgresponse */ #define MC_CMD_SENSOR_INFO_OUT_LENMIN 12 #define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 -#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */ -#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */ -#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */ -#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */ -#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */ -#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */ -#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */ -#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */ -#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */ -#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */ -#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */ -#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */ -#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */ -#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */ -#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */ +/* enum: Controller temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +/* enum: Phy common temperature: degC */ +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +/* enum: Controller cooling: bool */ +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +/* enum: Phy 0 temperature: degC */ +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +/* enum: Phy 0 cooling: bool */ +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +/* enum: Phy 1 temperature: degC */ +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +/* enum: Phy 1 cooling: bool */ +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +/* enum: 1.0v power: mV */ +#define MC_CMD_SENSOR_IN_1V0 0x7 +/* enum: 1.2v power: mV */ +#define MC_CMD_SENSOR_IN_1V2 0x8 +/* enum: 1.8v power: mV */ +#define MC_CMD_SENSOR_IN_1V8 0x9 +/* enum: 2.5v power: mV */ +#define MC_CMD_SENSOR_IN_2V5 0xa +/* enum: 3.3v power: mV */ +#define MC_CMD_SENSOR_IN_3V3 0xb +/* enum: 12v power: mV */ +#define MC_CMD_SENSOR_IN_12V0 0xc +/* enum: 1.2v analogue power: mV */ +#define MC_CMD_SENSOR_IN_1V2A 0xd +/* enum: reference voltage: mV */ +#define MC_CMD_SENSOR_IN_VREF 0xe +/* enum: AOE FPGA power: mV */ +#define MC_CMD_SENSOR_OUT_VAOE 0xf +/* enum: AOE FPGA temperature: degC */ +#define MC_CMD_SENSOR_AOE_TEMP 0x10 +/* enum: AOE FPGA PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +/* enum: AOE PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_TEMP 0x12 +/* enum: Fan 0 speed: RPM */ +#define MC_CMD_SENSOR_FAN_0 0x13 +/* enum: Fan 1 speed: RPM */ +#define MC_CMD_SENSOR_FAN_1 0x14 +/* enum: Fan 2 speed: RPM */ +#define MC_CMD_SENSOR_FAN_2 0x15 +/* enum: Fan 3 speed: RPM */ +#define MC_CMD_SENSOR_FAN_3 0x16 +/* enum: Fan 4 speed: RPM */ +#define MC_CMD_SENSOR_FAN_4 0x17 +/* enum: AOE FPGA input power: mV */ +#define MC_CMD_SENSOR_IN_VAOE 0x18 +/* enum: AOE FPGA current: mA */ +#define MC_CMD_SENSOR_OUT_IAOE 0x19 +/* enum: AOE FPGA input current: mA */ +#define MC_CMD_SENSOR_IN_IAOE 0x1a +/* enum: NIC power consumption: W */ +#define MC_CMD_SENSOR_NIC_POWER 0x1b +/* enum: 0.9v power voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9 0x1c +/* enum: 0.9v power current: mA */ +#define MC_CMD_SENSOR_IN_I0V9 0x1d +/* enum: 1.2v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V2 0x1e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +/* enum: 0.9v power voltage (at ADC): mV */ +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +/* enum: Controller temperature 2: degC */ +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +/* enum: Voltage regulator internal temperature: degC */ +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +/* enum: 0.9V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +/* enum: 1.2V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +/* enum: controller internal temperature sensor voltage (internal ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +/* enum: controller internal temperature (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +/* enum: controller internal temperature sensor voltage (external ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +/* enum: controller internal temperature (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +/* enum: ambient temperature: degC */ +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +/* enum: air flow: bool */ +#define MC_CMD_SENSOR_AIRFLOW 0x2a +/* enum: voltage between VSS08D and VSS08D at CSR: mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 #define MC_CMD_SENSOR_ENTRY_LO_OFST 4 @@ -2104,6 +3227,23 @@ #define MC_CMD_SENSOR_ENTRY_MINNUM 1 #define MC_CMD_SENSOR_ENTRY_MAXNUM 31 +/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO_OUT */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1 +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +/* MC_CMD_SENSOR_ENTRY_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_LEN 8 */ +/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ +/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ + /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ #define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 #define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 @@ -2126,39 +3266,80 @@ /***********************************/ /* MC_CMD_READ_SENSORS - * Returns the current reading from each sensor. + * Returns the current reading from each sensor. DMAs an array of sensor + * readings, in order of sensor type (but without gaps for unimplemented + * sensors), into host memory. Each array element is a + * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword. + * + * If the request does not contain the LENGTH field then only sensors 0 to 30 + * are reported, to avoid DMA buffer overflow in older host software. If the + * sensor reading require more space than the LENGTH allows, then return + * EINVAL. + * + * The MC will send a SENSOREVT event every time any sensor changes state. The + * driver is responsible for ensuring that it doesn't miss any events. The + * board will function normally if all sensors are in STATE_OK or + * STATE_WARNING. Otherwise the board should not be expected to function. */ #define MC_CMD_READ_SENSORS 0x42 /* MC_CMD_READ_SENSORS_IN msgrequest */ #define MC_CMD_READ_SENSORS_IN_LEN 8 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 +/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 +/* DMA address of host buffer for sensor readings */ +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 + /* MC_CMD_READ_SENSORS_OUT msgresponse */ #define MC_CMD_READ_SENSORS_OUT_LEN 0 +/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0 + /* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ -#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 -#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */ -#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */ -#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */ -#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */ +/* enum: Ok. */ +#define MC_CMD_SENSOR_STATE_OK 0x0 +/* enum: Breached warning threshold. */ +#define MC_CMD_SENSOR_STATE_WARNING 0x1 +/* enum: Breached fatal threshold. */ +#define MC_CMD_SENSOR_STATE_FATAL 0x2 +/* enum: Fault with sensor. */ +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +/* enum: Sensor is working but does not currently have a reading. */ +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8 /***********************************/ /* MC_CMD_GET_PHY_STATE - * Report current state of PHY. + * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot + * (e.g. due to missing or corrupted firmware). Locks required: None. Return + * code: 0 */ #define MC_CMD_GET_PHY_STATE 0x43 @@ -2168,13 +3349,16 @@ /* MC_CMD_GET_PHY_STATE_OUT msgresponse */ #define MC_CMD_GET_PHY_STATE_OUT_LEN 4 #define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 -#define MC_CMD_PHY_STATE_OK 0x1 /* enum */ -#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */ +/* enum: Ok. */ +#define MC_CMD_PHY_STATE_OK 0x1 +/* enum: Faulty. */ +#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /***********************************/ /* MC_CMD_SETUP_8021QBB - * 802.1Qbb control. + * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to + * disable 802.Qbb for a given priority. */ #define MC_CMD_SETUP_8021QBB 0x44 @@ -2189,7 +3373,7 @@ /***********************************/ /* MC_CMD_WOL_FILTER_GET - * Retrieve ID of any WoL filters. + * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS */ #define MC_CMD_WOL_FILTER_GET 0x45 @@ -2203,7 +3387,8 @@ /***********************************/ /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD - * Add a protocol offload to NIC for lights-out state. + * Add a protocol offload to NIC for lights-out state. Locks required: None. + * Returns: 0, ENOSYS */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 @@ -2243,7 +3428,8 @@ /***********************************/ /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD - * Remove a protocol offload from NIC for lights-out state. + * Remove a protocol offload from NIC for lights-out state. Locks required: + * None. Returns: 0, ENOSYS */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 @@ -2258,7 +3444,7 @@ /***********************************/ /* MC_CMD_MAC_RESET_RESTORE - * Restore MAC after block reset. + * Restore MAC after block reset. Locks required: None. Returns: 0. */ #define MC_CMD_MAC_RESET_RESTORE 0x48 @@ -2271,6 +3457,9 @@ /***********************************/ /* MC_CMD_TESTASSERT + * Deliberately trigger an assert-detonation in the firmware for testing + * purposes (i.e. to allow tests that the driver copes gracefully). Locks + * required: None Returns: 0 */ #define MC_CMD_TESTASSERT 0x49 @@ -2283,14 +3472,23 @@ /***********************************/ /* MC_CMD_WORKAROUND - * Enable/Disable a given workaround. + * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't + * understand the given workaround number - which should not be treated as a + * hard error by client code. This op does not imply any semantics about each + * workaround, that's between the driver and the mcfw on a per-workaround + * basis. Locks required: None. Returns: 0, EINVAL . */ #define MC_CMD_WORKAROUND 0x4a /* MC_CMD_WORKAROUND_IN msgrequest */ #define MC_CMD_WORKAROUND_IN_LEN 8 #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 -#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */ +/* enum: Bug 17230 work around. */ +#define MC_CMD_WORKAROUND_BUG17230 0x1 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_WORKAROUND_BUG35388 0x2 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_WORKAROUND_BUG35017 0x3 #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ @@ -2299,7 +3497,12 @@ /***********************************/ /* MC_CMD_GET_PHY_MEDIA_INFO - * Read media-specific data from PHY. + * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for + * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG + * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the + * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1 + * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. + * Anything else: currently undefined. Locks required: None. Return code: 0. */ #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b @@ -2311,6 +3514,7 @@ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) +/* in bytes */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 @@ -2320,7 +3524,8 @@ /***********************************/ /* MC_CMD_NVRAM_TEST - * Test a particular NVRAM partition. + * Test a particular NVRAM partition for valid contents (where "valid" depends + * on the type of partition). */ #define MC_CMD_NVRAM_TEST 0x4c @@ -2333,22 +3538,31 @@ /* MC_CMD_NVRAM_TEST_OUT msgresponse */ #define MC_CMD_NVRAM_TEST_OUT_LEN 4 #define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 -#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */ -#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */ -#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */ +/* enum: Passed. */ +#define MC_CMD_NVRAM_TEST_PASS 0x0 +/* enum: Failed. */ +#define MC_CMD_NVRAM_TEST_FAIL 0x1 +/* enum: Not supported. */ +#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /***********************************/ /* MC_CMD_MRSFP_TWEAK - * Read status and/or set parameters for the 'mrsfp' driver. + * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds. + * I2C I/O expander bits are always read; if equaliser parameters are supplied, + * they are configured first. Locks required: None. Return code: 0, EINVAL. */ #define MC_CMD_MRSFP_TWEAK 0x4d /* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 +/* 0-6 low->high de-emph. */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +/* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +/* 0-8 0-8 low->high boost */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +/* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ @@ -2356,16 +3570,23 @@ /* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ #define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 +/* input bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +/* output bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +/* direction */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */ -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */ +/* enum: Out. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 +/* enum: In. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /***********************************/ /* MC_CMD_SENSOR_SET_LIMS - * Adjusts the sensor limits. + * Adjusts the sensor limits. This is a warranty-voiding operation. Returns: + * ENOENT if the sensor specified does not exist, EINVAL if the limits are out + * of range. */ #define MC_CMD_SENSOR_SET_LIMS 0x4e @@ -2374,9 +3595,13 @@ #define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +/* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +/* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +/* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +/* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ @@ -2398,9 +3623,3636 @@ #define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 + +/***********************************/ +/* MC_CMD_NVRAM_PARTITIONS + * Reads the list of available virtual NVRAM partition types. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_PARTITIONS 0x51 + +/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */ +#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0 + +/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) +/* total number of partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +/* type ID code for each of NUM_PARTITIONS partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62 + + +/***********************************/ +/* MC_CMD_NVRAM_METADATA + * Reads soft metadata for a virtual NVRAM partition type. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_METADATA 0x52 + +/* MC_CMD_NVRAM_METADATA_IN msgrequest */ +#define MC_CMD_NVRAM_METADATA_IN_LEN 4 +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 + +/* MC_CMD_NVRAM_METADATA_OUT msgresponse */ +#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252 +#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 +/* Subtype ID code for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +/* 1st component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 +/* 2nd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2 +/* 3rd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2 +/* 4th component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2 +/* Zero-terminated string describing the content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232 + + +/***********************************/ +/* MC_CMD_GET_MAC_ADDRESSES + * Returns the base MAC, count and stride for the requestiong function + */ +#define MC_CMD_GET_MAC_ADDRESSES 0x55 + +/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 + +/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16 +/* Base MAC address */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6 +/* Padding */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 +/* Number of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +/* Spacing of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 + /* MC_CMD_RESOURCE_SPECIFIER enum */ -#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */ -#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */ +/* enum: Any */ +#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff +/* enum: None */ +#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe + +/* EVB_PORT_ID structuredef */ +#define EVB_PORT_ID_LEN 4 +#define EVB_PORT_ID_PORT_ID_OFST 0 +/* enum: An invalid port handle. */ +#define EVB_PORT_ID_NULL 0x0 +/* enum: The port assigned to this function.. */ +#define EVB_PORT_ID_ASSIGNED 0x1000000 +/* enum: External network port 0 */ +#define EVB_PORT_ID_MAC0 0x2000000 +/* enum: External network port 1 */ +#define EVB_PORT_ID_MAC1 0x2000001 +/* enum: External network port 2 */ +#define EVB_PORT_ID_MAC2 0x2000002 +/* enum: External network port 3 */ +#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_PORT_ID_LBN 0 +#define EVB_PORT_ID_PORT_ID_WIDTH 32 + +/* EVB_VLAN_TAG structuredef */ +#define EVB_VLAN_TAG_LEN 2 +/* The VLAN tag value */ +#define EVB_VLAN_TAG_VLAN_ID_LBN 0 +#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12 +#define EVB_VLAN_TAG_MODE_LBN 12 +#define EVB_VLAN_TAG_MODE_WIDTH 4 +/* enum: Insert the VLAN. */ +#define EVB_VLAN_TAG_INSERT 0x0 +/* enum: Replace the VLAN if already present. */ +#define EVB_VLAN_TAG_REPLACE 0x1 + +/* BUFTBL_ENTRY structuredef */ +#define BUFTBL_ENTRY_LEN 12 +/* the owner ID */ +#define BUFTBL_ENTRY_OID_OFST 0 +#define BUFTBL_ENTRY_OID_LEN 2 +#define BUFTBL_ENTRY_OID_LBN 0 +#define BUFTBL_ENTRY_OID_WIDTH 16 +/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */ +#define BUFTBL_ENTRY_PGSZ_OFST 2 +#define BUFTBL_ENTRY_PGSZ_LEN 2 +#define BUFTBL_ENTRY_PGSZ_LBN 16 +#define BUFTBL_ENTRY_PGSZ_WIDTH 16 +/* the raw 64-bit address field from the SMC, not adjusted for page size */ +#define BUFTBL_ENTRY_RAWADDR_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_LEN 8 +#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8 +#define BUFTBL_ENTRY_RAWADDR_LBN 32 +#define BUFTBL_ENTRY_RAWADDR_WIDTH 64 + +/* NVRAM_PARTITION_TYPE structuredef */ +#define NVRAM_PARTITION_TYPE_LEN 2 +#define NVRAM_PARTITION_TYPE_ID_OFST 0 +#define NVRAM_PARTITION_TYPE_ID_LEN 2 +/* enum: Primary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +/* enum: Secondary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +/* enum: Expansion ROM partition */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +/* enum: Static configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +/* enum: Dynamic configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +/* enum: Expansion ROM configuration data for port 0 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Expansion ROM configuration data for port 1 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +/* enum: Expansion ROM configuration data for port 2 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +/* enum: Expansion ROM configuration data for port 3 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +/* enum: Non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Device state dump output partition */ +#define NVRAM_PARTITION_TYPE_DUMP 0x800 +/* enum: Application license key storage partition */ +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +/* enum: Start of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +/* enum: End of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +/* enum: Recovery partition map (provided if real map is missing or corrupt) */ +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +/* enum: Partition map (real map as stored in flash) */ +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_ID_LBN 0 +#define NVRAM_PARTITION_TYPE_ID_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_REGS + * Get a dump of the MCPU registers + */ +#define MC_CMD_READ_REGS 0x50 + +/* MC_CMD_READ_REGS_IN msgrequest */ +#define MC_CMD_READ_REGS_IN_LEN 0 + +/* MC_CMD_READ_REGS_OUT msgresponse */ +#define MC_CMD_READ_REGS_OUT_LEN 308 +/* Whether the corresponding register entry contains a valid value */ +#define MC_CMD_READ_REGS_OUT_MASK_OFST 0 +#define MC_CMD_READ_REGS_OUT_MASK_LEN 16 +/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr, + * fir, fp) + */ +#define MC_CMD_READ_REGS_OUT_REGS_OFST 16 +#define MC_CMD_READ_REGS_OUT_REGS_LEN 4 +#define MC_CMD_READ_REGS_OUT_REGS_NUM 73 + + +/***********************************/ +/* MC_CMD_INIT_EVQ + * Set up an event queue according to the supplied parameters. The IN arguments + * end with an address for each 4k of host memory required to back the EVQ. + */ +#define MC_CMD_INIT_EVQ 0x80 + +/* MC_CMD_INIT_EVQ_IN msgrequest */ +#define MC_CMD_INIT_EVQ_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +/* tbd */ +#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 + +/* MC_CMD_INIT_EVQ_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_OUT_LEN 4 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 + +/* QUEUE_CRC_MODE structuredef */ +#define QUEUE_CRC_MODE_LEN 1 +#define QUEUE_CRC_MODE_MODE_LBN 0 +#define QUEUE_CRC_MODE_MODE_WIDTH 4 +/* enum: No CRC. */ +#define QUEUE_CRC_MODE_NONE 0x0 +/* enum: CRC Fiber channel over ethernet. */ +#define QUEUE_CRC_MODE_FCOE 0x1 +/* enum: CRC (digest) iSCSI header only. */ +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +/* enum: CRC (digest) iSCSI header and payload. */ +#define QUEUE_CRC_MODE_ISCSI 0x3 +/* enum: CRC Fiber channel over IP over ethernet. */ +#define QUEUE_CRC_MODE_FCOIPOE 0x4 +/* enum: CRC MPA. */ +#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_SPARE_LBN 4 +#define QUEUE_CRC_MODE_SPARE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_INIT_RXQ + * set up a receive queue according to the supplied parameters. The IN + * arguments end with an address for each 4k of host memory required to back + * the RXQ. + */ +#define MC_CMD_INIT_RXQ 0x81 + +/* MC_CMD_INIT_RXQ_IN msgrequest */ +#define MC_CMD_INIT_RXQ_IN_LENMIN 36 +#define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 + +/* MC_CMD_INIT_RXQ_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_INIT_TXQ + */ +#define MC_CMD_INIT_TXQ 0x82 + +/* MC_CMD_INIT_TXQ_IN msgrequest */ +#define MC_CMD_INIT_TXQ_IN_LENMIN 36 +#define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 + +/* MC_CMD_INIT_TXQ_OUT msgresponse */ +#define MC_CMD_INIT_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_EVQ + * Teardown an EVQ. + * + * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first + * or the operation will fail with EBUSY + */ +#define MC_CMD_FINI_EVQ 0x83 + +/* MC_CMD_FINI_EVQ_IN msgrequest */ +#define MC_CMD_FINI_EVQ_IN_LEN 4 +/* Instance of EVQ to destroy. Should be the same instance as that previously + * passed to INIT_EVQ + */ +#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_EVQ_OUT msgresponse */ +#define MC_CMD_FINI_EVQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_RXQ + * Teardown a RXQ. + */ +#define MC_CMD_FINI_RXQ 0x84 + +/* MC_CMD_FINI_RXQ_IN msgrequest */ +#define MC_CMD_FINI_RXQ_IN_LEN 4 +/* Instance of RXQ to destroy */ +#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_RXQ_OUT msgresponse */ +#define MC_CMD_FINI_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_TXQ + * Teardown a TXQ. + */ +#define MC_CMD_FINI_TXQ 0x85 + +/* MC_CMD_FINI_TXQ_IN msgrequest */ +#define MC_CMD_FINI_TXQ_IN_LEN 4 +/* Instance of TXQ to destroy */ +#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_TXQ_OUT msgresponse */ +#define MC_CMD_FINI_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DRIVER_EVENT + * Generate an event on an EVQ belonging to the function issuing the command. + */ +#define MC_CMD_DRIVER_EVENT 0x86 + +/* MC_CMD_DRIVER_EVENT_IN msgrequest */ +#define MC_CMD_DRIVER_EVENT_IN_LEN 12 +/* Handle of target EVQ */ +#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +/* Bits 0 - 63 of event */ +#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8 + +/* MC_CMD_DRIVER_EVENT_OUT msgresponse */ +#define MC_CMD_DRIVER_EVENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_CMD + * Execute an arbitrary MCDI command on behalf of a different function, subject + * to security restrictions. The command to be proxied follows immediately + * afterward in the host buffer (or on the UART). This command supercedes + * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated. + */ +#define MC_CMD_PROXY_CMD 0x5b + +/* MC_CMD_PROXY_CMD_IN msgrequest */ +#define MC_CMD_PROXY_CMD_IN_LEN 4 +/* The handle of the target function. */ +#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ + + +/***********************************/ +/* MC_CMD_ALLOC_BUFTBL_CHUNK + * Allocate a set of buffer table entries using the specified owner ID. This + * operation allocates the required buffer table entries (and fails if it + * cannot do so). The buffer table entries will initially be zeroed. + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 +/* Owner ID to use */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +/* Size of buffer table pages to use, in bytes (note that only a few values are + * legal on any specific hardware). + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +/* Buffer table IDs for use in DMA descriptors. */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 + + +/***********************************/ +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES + * Reprogram a set of buffer table entries in the specified chunk. + */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +/* ID */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +/* Num entries */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +/* Buffer table entry address */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FREE_BUFTBL_CHUNK + */ +#define MC_CMD_FREE_BUFTBL_CHUNK 0x89 + +/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 + +/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FILTER_OP + * Multiplexed MCDI call for filter operations + */ +#define MC_CMD_FILTER_OP 0x8a + +/* MC_CMD_FILTER_OP_IN msgrequest */ +#define MC_CMD_FILTER_OP_IN_LEN 108 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_IN_OP_OFST 0 +/* enum: single-recipient filter insert */ +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +/* enum: single-recipient filter remove */ +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +/* enum: multi-recipient filter subscribe */ +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +/* enum: multi-recipient filter unsubscribe */ +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +/* enum: replace one recipient with another (warning - the filter handle may + * change) + */ +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +/* enum: loop back to port 0 TX MAC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +/* enum: loop back to port 1 TX MAC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered + * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be + * a valid handle. + */ +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +/* Firmware defined register 1 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +/* array of supported match types (valid MATCH_FIELDS values for + * MC_CMD_FILTER_OP) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 + + +/***********************************/ +/* MC_CMD_PARSER_DISP_RW + * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging + */ +#define MC_CMD_PARSER_DISP_RW 0xe5 + +/* MC_CMD_PARSER_DISP_RW_IN msgrequest */ +#define MC_CMD_PARSER_DISP_RW_IN_LEN 32 +/* identifies the target of the operation */ +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 +/* enum: RX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 +/* enum: TX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 +/* enum: Lookup engine */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* identifies the type of operation requested */ +#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 +/* enum: read a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 +/* enum: write a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 +/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ +#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 +/* data memory address or LUE index */ +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +/* value to write (for DMEM writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 +/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 +/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +/* value to write (for LUE writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 + +/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */ +#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 +/* value read (for DMEM reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 +/* value read (for LUE reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 +/* up to 8 32-bit words of additional soft state from the LUE manager (the + * exact content is firmware-dependent and intended only for debug use) + */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 + + +/***********************************/ +/* MC_CMD_GET_PF_COUNT + * Get number of PFs on the device. + */ +#define MC_CMD_GET_PF_COUNT 0xb6 + +/* MC_CMD_GET_PF_COUNT_IN msgrequest */ +#define MC_CMD_GET_PF_COUNT_IN_LEN 0 + +/* MC_CMD_GET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_GET_PF_COUNT_OUT_LEN 1 +/* Identifies the number of PFs on the device. */ +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0 +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1 + + +/***********************************/ +/* MC_CMD_SET_PF_COUNT + * Set number of PFs on the device. + */ +#define MC_CMD_SET_PF_COUNT 0xb7 + +/* MC_CMD_SET_PF_COUNT_IN msgrequest */ +#define MC_CMD_SET_PF_COUNT_IN_LEN 4 +/* New number of PFs on the device. */ +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 + +/* MC_CMD_SET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_SET_PF_COUNT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_ASSIGNMENT + * Get port assignment for current PCI function. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 + +/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 + +/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 + + +/***********************************/ +/* MC_CMD_SET_PORT_ASSIGNMENT + * Set port assignment for current PCI function. + */ +#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 + +/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 + +/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_VIS + * Allocate VIs for current PCI function. + */ +#define MC_CMD_ALLOC_VIS 0x8b + +/* MC_CMD_ALLOC_VIS_IN msgrequest */ +#define MC_CMD_ALLOC_VIS_IN_LEN 8 +/* The minimum number of VIs that is acceptable */ +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +/* The maximum number of VIs that would be useful */ +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 + +/* MC_CMD_ALLOC_VIS_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 + + +/***********************************/ +/* MC_CMD_FREE_VIS + * Free VIs for current PCI function. Any linked PIO buffers will be unlinked, + * but not freed. + */ +#define MC_CMD_FREE_VIS 0x8c + +/* MC_CMD_FREE_VIS_IN msgrequest */ +#define MC_CMD_FREE_VIS_IN_LEN 0 + +/* MC_CMD_FREE_VIS_OUT msgresponse */ +#define MC_CMD_FREE_VIS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SRIOV_CFG + * Get SRIOV config for this PF. + */ +#define MC_CMD_GET_SRIOV_CFG 0xba + +/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0 + +/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +/* RID offset of each subsequent VF from the previous. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 + + +/***********************************/ +/* MC_CMD_SET_SRIOV_CFG + * Set SRIOV config for this PF. + */ +#define MC_CMD_SET_SRIOV_CFG 0xbb + +/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF, or 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +/* RID offset of each subsequent VF from the previous, 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 + +/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_ALLOC_INFO + * Get information about number of VI's and base VI number allocated to this + * function. + */ +#define MC_CMD_GET_VI_ALLOC_INFO 0x8d + +/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ +#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 + +/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 + + +/***********************************/ +/* MC_CMD_DUMP_VI_STATE + * For CmdClient use. Dump pertinent information on a specific absolute VI. + */ +#define MC_CMD_DUMP_VI_STATE 0x8e + +/* MC_CMD_DUMP_VI_STATE_IN msgrequest */ +#define MC_CMD_DUMP_VI_STATE_IN_LEN 4 +/* The VI number to query. */ +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 + +/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ +#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 +/* The PF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2 +/* The VF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2 +/* Base of VIs allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2 +/* Count of VIs allocated to the owner function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2 +/* Base interrupt vector allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2 +/* Number of interrupt vectors allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2 +/* Raw evq ptr table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16 +/* Raw evq timer table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76 +/* Reserved, currently 0. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 + + +/***********************************/ +/* MC_CMD_ALLOC_PIOBUF + * Allocate a push I/O buffer for later use with a tx queue. + */ +#define MC_CMD_ALLOC_PIOBUF 0x8f + +/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */ +#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0 + +/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */ +#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 + + +/***********************************/ +/* MC_CMD_FREE_PIOBUF + * Free a push I/O buffer. + */ +#define MC_CMD_FREE_PIOBUF 0x90 + +/* MC_CMD_FREE_PIOBUF_IN msgrequest */ +#define MC_CMD_FREE_PIOBUF_IN_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 + +/* MC_CMD_FREE_PIOBUF_OUT msgresponse */ +#define MC_CMD_FREE_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_TLP_PROCESSING + * Get TLP steering and ordering information for a VI. + */ +#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0 + +/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 +/* VI number to get information for. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 + +/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1 +/* Set no snoop bit for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 + + +/***********************************/ +/* MC_CMD_SET_VI_TLP_PROCESSING + * Set TLP steering and ordering information for a VI. + */ +#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1 + +/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 +/* VI number to set information for. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1 +/* Set the no snoop bit for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 + +/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS + * Get global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +/* enum: MISC. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 +/* enum: IDO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 +/* enum: RO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 +/* enum: TPH Type. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23 + + +/***********************************/ +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS + * Set global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22 + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SATELLITE_DOWNLOAD + * Download a new set of images to the satellite CPUs from the host. + */ +#define MC_CMD_SATELLITE_DOWNLOAD 0x91 + +/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs + * are subtle, and so downloads must proceed in a number of phases. + * + * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0. + * + * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download + * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should + * be a checksum (a simple 32-bit sum) of the transferred data. An individual + * download may be aborted using CHUNK_ID_ABORT. + * + * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15), + * similar to PHASE_IMEMS. + * + * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0. + * + * After any error (a requested abort is not considered to be an error) the + * sequence must be restarted from PHASE_RESET. + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num)) +/* Download phase. (Note: the IDLE phase is used internally and is never valid + * in a command from the host.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ +/* Target for download. (These match the blob numbers defined in + * mc_flash_layout.h.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf +/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff +/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 +/* enum: Last chunk, containing checksum rather than data */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff +/* enum: Abort download of this item */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe +/* Length of this chunk in bytes */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 +/* Data for this chunk */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59 + +/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 +/* Extra status information */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 +/* enum: Code download OK, completed. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 +/* enum: Code download aborted as requested. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 +/* enum: Code download OK so far, send next chunk. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 +/* enum: Download phases out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 +/* enum: Bad target for this phase */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 +/* enum: Chunk ID out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 +/* enum: Chunk length zero or too large */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 +/* enum: Checksum was incorrect */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 + + +/***********************************/ +/* MC_CMD_GET_CAPABILITIES + * Get device capabilities. + * + * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to + * reference inherent device capabilities as opposed to current NVRAM config. + */ +#define MC_CMD_GET_CAPABILITIES 0xbe + +/* MC_CMD_GET_CAPABILITIES_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 + + +/***********************************/ +/* MC_CMD_V2_EXTN + * Encapsulation for a v2 extended command + */ +#define MC_CMD_V2_EXTN 0x7f + +/* MC_CMD_V2_EXTN_IN msgrequest */ +#define MC_CMD_V2_EXTN_IN_LEN 4 +/* the extended command number */ +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0 +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1 +/* the actual length of the encapsulated command (which is not in the v1 + * header) + */ +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 +#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_ALLOC + * Allocate a pacer bucket (for qau rp or a snapper test) + */ +#define MC_CMD_TCM_BUCKET_ALLOC 0xb2 + +/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0 + +/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_FREE + * Free a pacer bucket + */ +#define MC_CMD_TCM_BUCKET_FREE 0xb3 + +/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 + +/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_INIT + * Initialise pacer bucket with a given rate + */ +#define MC_CMD_TCM_BUCKET_INIT 0xb4 + +/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 + +/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_TXQ_INIT + * Initialise txq in pacer with given options or set options + */ +#define MC_CMD_TCM_TXQ_INIT 0xb5 + +/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 +/* bitmask of the priority queues this txq is inserted into */ +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 + +/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ +#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LINK_PIOBUF + * Link a push I/O buffer to a TxQ + */ +#define MC_CMD_LINK_PIOBUF 0x92 + +/* MC_CMD_LINK_PIOBUF_IN msgrequest */ +#define MC_CMD_LINK_PIOBUF_IN_LEN 8 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +/* Function Local Instance (VI) number. */ +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 + +/* MC_CMD_LINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_LINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UNLINK_PIOBUF + * Unlink a push I/O buffer from a TxQ + */ +#define MC_CMD_UNLINK_PIOBUF 0x93 + +/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */ +#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 + +/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_ALLOC + * allocate and initialise a v-switch. + */ +#define MC_CMD_VSWITCH_ALLOC 0x94 + +/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */ +#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 +/* The port to connect to the v-switch's upstream port. */ +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* The type of v-switch to create. */ +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +/* enum: VLAN */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +/* enum: VEB */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +/* enum: VEPA */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* Flags controlling v-port creation */ +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to support. */ +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 + +/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ +#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_FREE + * de-allocate a v-switch. + */ +#define MC_CMD_VSWITCH_FREE 0x95 + +/* MC_CMD_VSWITCH_FREE_IN msgrequest */ +#define MC_CMD_VSWITCH_FREE_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VSWITCH_FREE_OUT msgresponse */ +#define MC_CMD_VSWITCH_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ALLOC + * allocate a v-port. + */ +#define MC_CMD_VPORT_ALLOC 0x96 + +/* MC_CMD_VPORT_ALLOC_IN msgrequest */ +#define MC_CMD_VPORT_ALLOC_IN_LEN 20 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* The type of the new v-port. */ +#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +/* enum: VLAN (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +/* enum: VEB (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +/* enum: A normal v-port receives packets which match a specified MAC and/or + * VLAN. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +/* enum: An expansion v-port packets traffic which don't match any other + * v-port. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +/* enum: An test v-port receives packets which match any filters installed by + * its downstream components. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +/* Flags controlling v-port creation */ +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to insert/remove. */ +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16 + +/* MC_CMD_VPORT_ALLOC_OUT msgresponse */ +#define MC_CMD_VPORT_ALLOC_OUT_LEN 4 +/* The handle of the new v-port */ +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_VPORT_FREE + * de-allocate a v-port. + */ +#define MC_CMD_VPORT_FREE 0x97 + +/* MC_CMD_VPORT_FREE_IN msgrequest */ +#define MC_CMD_VPORT_FREE_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 + +/* MC_CMD_VPORT_FREE_OUT msgresponse */ +#define MC_CMD_VPORT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_ALLOC + * allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_ALLOC 0x98 + +/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */ +#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16 +/* The port to connect to the v-adaptor's port. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* Flags controlling v-adaptor creation */ +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +/* The number of VLAN tags to strip on receive */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 + +/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_FREE + * de-allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_FREE 0x99 + +/* MC_CMD_VADAPTOR_FREE_IN msgrequest */ +#define MC_CMD_VADAPTOR_FREE_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ +#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_EVB_PORT_ASSIGN + * assign a port to a PCI function. + */ +#define MC_CMD_EVB_PORT_ASSIGN 0x9a + +/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 +/* The port to assign. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +/* The target function to modify. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16 + +/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */ +#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RDWR_A64_REGIONS + * Assign the 64 bit region addresses. + */ +#define MC_CMD_RDWR_A64_REGIONS 0x9b + +/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ +#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +/* Write enable bits 0-3, set to write, clear to read. */ +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1 + +/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included + * regardless of state of write bits in the request. + */ +#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_ALLOC + * Allocate an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c + +/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 +/* The handle of the owning upstream port */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 +/* The handle of the new Onload stack */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_FREE + * Free an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_FREE 0x9d + +/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 +/* The handle of the Onload stack */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 + +/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_ALLOC + * Allocate an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e + +/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +/* Number of queues spanned by this context, in the range 1-64; valid offsets + * in the indirection table will be in the range 0 to NUM_QUEUES-1. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 + +/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 +/* The handle of the new RSS context */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_FREE + * Free an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_FREE 0x9f + +/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_KEY + * Set the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_KEY + * Get the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_TABLE + * Set the indirection table for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_TABLE + * Get the indirection table for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_FLAGS + * Set various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +/* Hash control flags */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_FLAGS + * Get various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 +/* Hash control flags */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_ALLOC + * Allocate a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4 + +/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 +/* The handle of the owning upstream port */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* Number of queues spanned by this mapping, in the range 1-64; valid fixed + * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and + * referenced RSS contexts must span no more than this number. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 + +/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 +/* The handle of the new .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_FREE + * Free a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_FREE 0xa5 + +/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 + +/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_SET_TABLE + * Set the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6 + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32 + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_GET_TABLE + * Get the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7 + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32 + + +/***********************************/ +/* MC_CMD_GET_VECTOR_CFG + * Get Interrupt Vector config for this PF. + */ +#define MC_CMD_GET_VECTOR_CFG 0xbf + +/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0 + +/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 +/* Base absolute interrupt vector number. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 + + +/***********************************/ +/* MC_CMD_SET_VECTOR_CFG + * Set Interrupt Vector config for this PF. + */ +#define MC_CMD_SET_VECTOR_CFG 0xc0 + +/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12 +/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to + * let the system find a suitable base. + */ +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 + +/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_RX_CLASS_STATS + * Retrieve rmon rx class statistics + */ +#define MC_CMD_RMON_RX_CLASS_STATS 0xc3 + +/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_CLASS_STATS + * Retrieve rmon tx class statistics + */ +#define MC_CMD_RMON_TX_CLASS_STATS 0xc4 + +/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_SUPER_CLASS_STATS + * Retrieve rmon rx super_class statistics + */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5 + +/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_SUPER_CLASS_STATS + * Retrieve rmon tx super_class statistics + */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6 + +/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS + * Add qid to class for statistics collection + */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7 + +/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12 +/* class */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 +/* qid */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4 +/* flags */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 + +/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS + * Add qid to class for statistics collection + */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8 + +/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12 +/* class */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 +/* qid */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4 +/* flags */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 + +/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS + * Add qid to class for statistics collection + */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9 + +/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12 +/* class */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 +/* qid */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4 +/* flags */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 + +/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_ALLOC_CLASS + * Allocate an rmon class + */ +#define MC_CMD_RMON_ALLOC_CLASS 0xca + +/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */ +#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0 + +/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4 +/* class */ +#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_RMON_DEALLOC_CLASS + * Deallocate an rmon class + */ +#define MC_CMD_RMON_DEALLOC_CLASS 0xcb + +/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */ +#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4 +/* class */ +#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0 + +/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_ALLOC_SUPER_CLASS + * Allocate an rmon super_class + */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc + +/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0 + +/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4 +/* super_class */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_RMON_DEALLOC_SUPER_CLASS + * Deallocate an rmon tx super_class + */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd + +/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4 +/* super_class */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0 + +/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_RX_UP_CONV_STATS + * Retrieve up converter statistics + */ +#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce + +/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPI_STATS + * Retrieve rx ipi stats + */ +#define MC_CMD_RMON_RX_IPI_STATS 0xcf + +/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0 +#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5 +#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5 +#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS + * Retrieve rx ipsec cntxt_ptr indexed stats + */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0 + +/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPSEC_PORT_STATS + * Retrieve rx ipsec port indexed stats + */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1 + +/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS + * Retrieve tx ipsec overflow + */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2 + +/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_VPORT_ADD_MAC_ADDRESS + * Add a MAC address to a v-port + */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +/* MAC address to add */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_DEL_MAC_ADDRESS + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +/* MAC address to add */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_GET_MAC_ADDRESSES + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) +/* The number of MAC addresses returned */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +/* Array of MAC addresses */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 + + +/***********************************/ +/* MC_CMD_DUMP_BUFTBL_ENTRIES + * Dump buffer table entries, mainly for command client debug use. Dumps + * absolute entries, and does not use chunk handles. All entries must be in + * range, and used for q page mapping, Although the latter restriction may be + * lifted in future. + */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 +/* Index of the first buffer table entry. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 +/* Number of buffer table entries to dump. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) +/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21 + + +/***********************************/ +/* MC_CMD_SET_RXDP_CONFIG + * Set global RXDP configuration settings + */ +#define MC_CMD_SET_RXDP_CONFIG 0xc1 + +/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 + +/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RXDP_CONFIG + * Get global RXDP configuration settings + */ +#define MC_CMD_GET_RXDP_CONFIG 0xc2 + +/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 + + +/***********************************/ +/* MC_CMD_RMON_RX_CLASS_DROPS_STATS + * Retrieve rx class drop stats + */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3 + +/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS + * Retrieve rx super class drop stats + */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4 + +/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_ERRORS_STATS + * Retrieve rxdp errors + */ +#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5 + +/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_OVERFLOW_STATS + * Retrieve rxdp overflow + */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6 + +/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPI_STATS + * Retrieve tx ipi stats + */ +#define MC_CMD_RMON_TX_IPI_STATS 0xd7 + +/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0 +#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5 +#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5 +#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS + * Retrieve tx ipsec counters by cntxt_ptr + */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8 + +/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPSEC_PORT_STATS + * Retrieve tx ipsec counters by port + */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9 + +/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS + * Retrieve tx ipsec overflow + */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda + +/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_NOWHERE_STATS + * Retrieve tx nowhere stats + */ +#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb + +/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS + * Retrieve tx nowhere qbb stats + */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc + +/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_ERRORS_STATS + * Retrieve rxdp errors + */ +#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd + +/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_OVERFLOW_STATS + * Retrieve rxdp overflow + */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde + +/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_COLLECT_CLASS_STATS + * Explicitly collect class stats at the specified evb port + */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf + +/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4 +/* The port id associated with the vport/pport at which to collect class stats + */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0 + +/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4 +/* class */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS + * Explicitly collect class stats at the specified evb port + */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0 + +/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4 +/* The port id associated with the vport/pport at which to collect class stats + */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0 + +/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4 +/* super_class */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_CLOCK + * Return the system and PDCPU clock frequencies. + */ +#define MC_CMD_GET_CLOCK 0xac + +/* MC_CMD_GET_CLOCK_IN msgrequest */ +#define MC_CMD_GET_CLOCK_IN_LEN 0 + +/* MC_CMD_GET_CLOCK_OUT msgresponse */ +#define MC_CMD_GET_CLOCK_OUT_LEN 8 +/* System frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* DPCPU frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 + + +/***********************************/ +/* MC_CMD_SET_CLOCK + * Control the system and DPCPU clock frequencies. Changes are lost reboot. + */ +#define MC_CMD_SET_CLOCK 0xad + +/* MC_CMD_SET_CLOCK_IN msgrequest */ +#define MC_CMD_SET_CLOCK_IN_LEN 12 +/* Requested system frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 +/* Requested inter-core frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 +/* Request DPCPU frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 + +/* MC_CMD_SET_CLOCK_OUT msgresponse */ +#define MC_CMD_SET_CLOCK_OUT_LEN 12 +/* Resulting system frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* Resulting inter-core frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +/* Resulting DPCPU frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 + + +/***********************************/ +/* MC_CMD_DPCPU_RPC + * Send an arbitrary DPCPU message. + */ +#define MC_CMD_DPCPU_RPC 0xae + +/* MC_CMD_DPCPU_RPC_IN msgrequest */ +#define MC_CMD_DPCPU_RPC_IN_LEN 36 +#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 +/* enum: RxDPCPU */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0 +/* enum: TxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 +/* enum: TxDPCPU1 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be + * initialised to zero + */ +#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 +/* Register data to write. Only valid in write/write-read. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 +/* Register address. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 + +/* MC_CMD_DPCPU_RPC_OUT msgresponse */ +#define MC_CMD_DPCPU_RPC_OUT_LEN 36 +#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 +/* DATA */ +#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 + + +/***********************************/ +/* MC_CMD_TRIGGER_INTERRUPT + * Trigger an interrupt by prodding the BIU. + */ +#define MC_CMD_TRIGGER_INTERRUPT 0xe3 + +/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 +/* Interrupt level relative to base for function. */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 + +/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ +#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DUMP_DO + * Take a dump of the DUT state + */ +#define MC_CMD_DUMP_DO 0xe8 + +/* MC_CMD_DUMP_DO_IN msgrequest */ +#define MC_CMD_DUMP_DO_IN_LEN 52 +#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 + +/* MC_CMD_DUMP_DO_OUT msgresponse */ +#define MC_CMD_DUMP_DO_OUT_LEN 4 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0 + + +/***********************************/ +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED + * Configure unsolicited dumps + */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 + +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 + + +/***********************************/ +/* MC_CMD_SET_PSU + * Adjusts power supply parameters. This is a warranty-voiding operation. + * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if + * the parameter is out of range. + */ +#define MC_CMD_SET_PSU 0xea + +/* MC_CMD_SET_PSU_IN msgrequest */ +#define MC_CMD_SET_PSU_IN_LEN 12 +#define MC_CMD_SET_PSU_IN_PARAM_OFST 0 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_OFST 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +/* desired value, eg voltage in mV */ +#define MC_CMD_SET_PSU_IN_VALUE_OFST 8 + +/* MC_CMD_SET_PSU_OUT msgresponse */ +#define MC_CMD_SET_PSU_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_FUNCTION_INFO + * Get function information. PF and VF number. + */ +#define MC_CMD_GET_FUNCTION_INFO 0xec + +/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */ +#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0 + +/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 + + +/***********************************/ +/* MC_CMD_ENABLE_OFFLINE_BIST + * Enters offline BIST mode. All queues are torn down, chip enters quiescent + * mode, calling function gets exclusive MCDI ownership. The only way out is + * reboot. + */ +#define MC_CMD_ENABLE_OFFLINE_BIST 0xed + +/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ +#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 + +/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */ +#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_START_KR_EYE_PLOT + * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid + * signal. + */ +#define MC_CMD_START_KR_EYE_PLOT 0xee + +/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */ +#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4 +#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0 + +/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_POLL_KR_EYE_PLOT + * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller + * should call this command repeatedly after starting eye plot, until no more + * data is returned. + */ +#define MC_CMD_POLL_KR_EYE_PLOT 0xef + +/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */ +#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0 + +/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 + + +/***********************************/ +/* MC_CMD_READ_FUSES + * Read data programmed into the device One-Time-Programmable (OTP) Fuses + */ +#define MC_CMD_READ_FUSES 0xf0 + +/* MC_CMD_READ_FUSES_IN msgrequest */ +#define MC_CMD_READ_FUSES_IN_LEN 8 +/* Offset in OTP to read */ +#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +/* Length of data to read in bytes */ +#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 + +/* MC_CMD_READ_FUSES_OUT msgresponse */ +#define MC_CMD_READ_FUSES_OUT_LENMIN 4 +#define MC_CMD_READ_FUSES_OUT_LENMAX 252 +#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) +/* Length of returned OTP data in bytes */ +#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +/* Returned data */ +#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 +#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 +#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 + + +/***********************************/ +/* MC_CMD_KR_TUNE + * Get or set KR Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_KR_TUNE 0xf1 + +/* MC_CMD_KR_TUNE_IN msgrequest */ +#define MC_CMD_KR_TUNE_IN_LENMIN 4 +#define MC_CMD_KR_TUNE_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 +/* enum: Force KR Serdes reset / recalibration */ +#define MC_CMD_KR_TUNE_IN_RECAL 0x4 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62 + +/* MC_CMD_KR_TUNE_OUT msgresponse */ +#define MC_CMD_KR_TUNE_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 +/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 +/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 +/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 +/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */ +#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PCIE_TUNE + * Get or set PCIE Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_PCIE_TUNE 0xf2 + +/* MC_CMD_PCIE_TUNE_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_IN_LENMIN 4 +#define MC_CMD_PCIE_TUNE_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62 + +/* MC_CMD_PCIE_TUNE_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 +/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 +/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 +/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 +/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TxMargin (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 +/* enum: TxSwing (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 +/* enum: De-emphasis coefficient C(-1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 +/* enum: De-emphasis coefficient C(0) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 +/* enum: De-emphasis coefficient C(+1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + + +/***********************************/ +/* MC_CMD_LICENSING + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + */ +#define MC_CMD_LICENSING 0xf3 + +/* MC_CMD_LICENSING_IN msgrequest */ +#define MC_CMD_LICENSING_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_IN_OP_OFST 0 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 + +/* MC_CMD_LICENSING_OUT msgresponse */ +#define MC_CMD_LICENSING_OUT_LEN 28 +/* count of application keys which are valid */ +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +/* count of application keys which are invalid due to being blacklisted */ +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +/* count of application keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +/* count of application keys which are invalid due to being for the wrong node + */ +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 + + +/***********************************/ +/* MC_CMD_MC2MC_PROXY + * Execute an arbitrary MCDI command on the slave MC of a dual-core device. + * This will fail on a single-core system. + */ +#define MC_CMD_MC2MC_PROXY 0xf4 #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 91df43f..e359227 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) if (rc) goto fail; - if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { + if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) { rc = -EIO; goto fail; } -- cgit v0.10.2 From df2cd8af097850bb3440817fdb6b08922ff4b327 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 19 Sep 2012 00:56:18 +0100 Subject: sfc: Add support for MCDI v2 MCDI v2 adds a second header dword with wider command and length fields. It also defines extra error codes. Change the fallback error number for unknown MCDI error codes from EIO to EPROTO. EIO is treated as indicating the MCDI transport has failed and we need to reset the function, which is rather drastic. v2 error codes and lengths don't fit into completion events, so for a v2-capable transport, always read the response header rather then using the event fields. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index fe83c26..e556a5d 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2404,6 +2404,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .phys_addr_channels = 4, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM, + .mcdi_max_ver = -1, }; const struct efx_nic_type falcon_b0_nic_type = { @@ -2481,5 +2482,6 @@ const struct efx_nic_type falcon_b0_nic_type = { * channels */ .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, + .mcdi_max_ver = -1, }; diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index c6c5830..2c5ee89 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -72,26 +72,44 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - efx_dword_t hdr; + efx_dword_t hdr[2]; + size_t hdr_len; u32 xflags, seqno; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); - BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V1); seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; - EFX_POPULATE_DWORD_6(hdr, - MCDI_HEADER_RESPONSE, 0, - MCDI_HEADER_RESYNC, 1, - MCDI_HEADER_CODE, cmd, - MCDI_HEADER_DATALEN, inlen, - MCDI_HEADER_SEQ, seqno, - MCDI_HEADER_XFLAGS, xflags); + if (efx->type->mcdi_max_ver == 1) { + /* MCDI v1 */ + EFX_POPULATE_DWORD_6(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, cmd, + MCDI_HEADER_DATALEN, inlen, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags); + hdr_len = 4; + } else { + /* MCDI v2 */ + BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); + EFX_POPULATE_DWORD_6(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags); + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); + hdr_len = 8; + } - efx->type->mcdi_request(efx, &hdr, 4, inbuf, inlen); + efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); } static void @@ -100,9 +118,8 @@ efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen) struct efx_mcdi_iface *mcdi = efx_mcdi(efx); BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); - BUG_ON(outlen > MCDI_CTL_SDU_LEN_MAX_V1); - efx->type->mcdi_read_response(efx, outbuf, 4, outlen); + efx->type->mcdi_read_response(efx, outbuf, mcdi->resp_hdr_len, outlen); } static int efx_mcdi_errno(unsigned int mcdi_err) @@ -113,17 +130,63 @@ static int efx_mcdi_errno(unsigned int mcdi_err) #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ return -name; + TRANSLATE_ERROR(EPERM); TRANSLATE_ERROR(ENOENT); TRANSLATE_ERROR(EINTR); + TRANSLATE_ERROR(EAGAIN); TRANSLATE_ERROR(EACCES); TRANSLATE_ERROR(EBUSY); TRANSLATE_ERROR(EINVAL); TRANSLATE_ERROR(EDEADLK); TRANSLATE_ERROR(ENOSYS); TRANSLATE_ERROR(ETIME); + TRANSLATE_ERROR(EALREADY); + TRANSLATE_ERROR(ENOSPC); #undef TRANSLATE_ERROR + case MC_CMD_ERR_ALLOC_FAIL: + return -ENOBUFS; + case MC_CMD_ERR_MAC_EXIST: + return -EADDRINUSE; default: - return -EIO; + return -EPROTO; + } +} + +static void efx_mcdi_read_response_header(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned int respseq, respcmd, error; + efx_dword_t hdr; + + efx->type->mcdi_read_response(efx, &hdr, 0, 4); + respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); + respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); + error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); + + if (respcmd != MC_CMD_V2_EXTN) { + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); + } else { + efx->type->mcdi_read_response(efx, &hdr, 4, 4); + mcdi->resp_hdr_len = 8; + mcdi->resp_data_len = + EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); + } + + if (error && mcdi->resp_data_len == 0) { + netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); + mcdi->resprc = -EIO; + } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx seq 0x%x\n", + respseq, mcdi->seqno); + mcdi->resprc = -EIO; + } else if (error) { + efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); + mcdi->resprc = + efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); + } else { + mcdi->resprc = 0; } } @@ -131,15 +194,17 @@ static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned long time, finish; - unsigned int respseq, respcmd, error; unsigned int spins; - efx_dword_t reg; int rc; /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ rc = efx_mcdi_poll_reboot(efx); - if (rc) - goto out; + if (rc) { + mcdi->resprc = rc; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + return 0; + } /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, * because generally mcdi responses are fast. After that, back off @@ -166,30 +231,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) return -ETIMEDOUT; } - efx->type->mcdi_read_response(efx, ®, 0, 4); - mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); - respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); - respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); - error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); - - if (error && mcdi->resplen == 0) { - netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); - rc = -EIO; - } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { - netif_err(efx, hw, efx->net_dev, - "MC response mismatch tx seq 0x%x rx seq 0x%x\n", - respseq, mcdi->seqno); - rc = -EIO; - } else if (error) { - efx->type->mcdi_read_response(efx, ®, 4, 4); - rc = efx_mcdi_errno(EFX_DWORD_FIELD(reg, EFX_DWORD_0)); - } else - rc = 0; - -out: - mcdi->resprc = rc; - if (rc) - mcdi->resplen = 0; + efx_mcdi_read_response_header(efx); /* Return rc=0 like wait_event_timeout() */ return 0; @@ -293,8 +335,14 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, "MC response mismatch tx seq 0x%x rx " "seq 0x%x\n", seqno, mcdi->seqno); } else { - mcdi->resprc = efx_mcdi_errno(mcdi_err); - mcdi->resplen = datalen; + if (efx->type->mcdi_max_ver >= 2) { + /* MCDI v2 responses don't fit in an event */ + efx_mcdi_read_response_header(efx); + } else { + mcdi->resprc = efx_mcdi_errno(mcdi_err); + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = datalen; + } wake = true; } @@ -310,16 +358,30 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { - efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); + int rc; + + rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); + if (rc) + return rc; return efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, outlen_actual); } -void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen) +int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + if (efx->type->mcdi_max_ver < 0 || + (efx->type->mcdi_max_ver < 2 && + cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) + return -EINVAL; + + if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || + (efx->type->mcdi_max_ver < 2 && + inlen > MCDI_CTL_SDU_LEN_MAX_V1)) + return -EMSGSIZE; + efx_mcdi_acquire(mcdi); /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ @@ -328,6 +390,7 @@ void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, spin_unlock_bh(&mcdi->iface_lock); efx_mcdi_copyin(efx, cmd, inbuf, inlen); + return 0; } int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, @@ -364,14 +427,14 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); rc = mcdi->resprc; - resplen = mcdi->resplen; + resplen = mcdi->resp_data_len; spin_unlock_bh(&mcdi->iface_lock); BUG_ON(rc > 0); if (rc == 0) { efx_mcdi_copyout(efx, outbuf, - min(outlen, mcdi->resplen)); + min(outlen, mcdi->resp_data_len)); if (outlen_actual != NULL) *outlen_actual = resplen; } else if (cmd == MC_CMD_REBOOT && rc == -EIO) @@ -467,7 +530,8 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) if (efx_mcdi_complete(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = rc; - mcdi->resplen = 0; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; ++mcdi->credits; } } else { diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 9b536d0..0cb0d5a 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -43,7 +43,8 @@ enum efx_mcdi_mode { * @credits: Number of spurious MCDI completion events allowed before we * trigger a fatal error. Protected by @lock * @resprc: Response error/success code (Linux numbering) - * @resplen: Returned payload length + * @resp_hdr_len: Response header length + * @resp_data_len: Response data (SDU or error) length */ struct efx_mcdi_iface { atomic_t state; @@ -53,7 +54,8 @@ struct efx_mcdi_iface { unsigned int credits; unsigned int seqno; int resprc; - size_t resplen; + size_t resp_hdr_len; + size_t resp_data_len; }; struct efx_mcdi_mon { @@ -93,8 +95,8 @@ extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); -extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen); +extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen); extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 7283cc1..e1deec4 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1029,6 +1029,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @timer_period_max: Maximum period of interrupt timer (in ticks) * @offload_features: net_device feature flags for protocol offload * features implemented in hardware + * @mcdi_max_ver: Maximum MCDI version supported */ struct efx_nic_type { int (*probe)(struct efx_nic *efx); @@ -1105,6 +1106,7 @@ struct efx_nic_type { unsigned int phys_addr_channels; unsigned int timer_period_max; netdev_features_t offload_features; + int mcdi_max_ver; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 59e09e1..b386901 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -538,8 +538,9 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) /* Clear flag that signals MC ready */ ACCESS_ONCE(*start) = 0; - efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, - MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + EFX_BUG_ON_PARANOID(rc); /* Wait for start from MCDI (or timeout) */ timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 6a833d5..fee0d2d 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -747,4 +747,5 @@ const struct efx_nic_type siena_a0_nic_type = { .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE), + .mcdi_max_ver = 1, }; -- cgit v0.10.2 From 369327fa65f20118571643d673b90d3700166e2d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 26 Oct 2012 17:53:12 +0100 Subject: sfc: Fix race in completion handling When we poll for MCDI request completion, we don't hold the interface lock while setting the response fields in struct efx_mcdi_iface. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 2c5ee89..1c8bf81 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -112,16 +112,6 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); } -static void -efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen) -{ - struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - - BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); - - efx->type->mcdi_read_response(efx, outbuf, mcdi->resp_hdr_len, outlen); -} - static int efx_mcdi_errno(unsigned int mcdi_err) { switch (mcdi_err) { @@ -200,9 +190,11 @@ static int efx_mcdi_poll(struct efx_nic *efx) /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ rc = efx_mcdi_poll_reboot(efx); if (rc) { + spin_lock_bh(&mcdi->iface_lock); mcdi->resprc = rc; mcdi->resp_hdr_len = 0; mcdi->resp_data_len = 0; + spin_unlock_bh(&mcdi->iface_lock); return 0; } @@ -231,7 +223,9 @@ static int efx_mcdi_poll(struct efx_nic *efx) return -ETIMEDOUT; } + spin_lock_bh(&mcdi->iface_lock); efx_mcdi_read_response_header(efx); + spin_unlock_bh(&mcdi->iface_lock); /* Return rc=0 like wait_event_timeout() */ return 0; @@ -419,7 +413,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, "MC command 0x%x inlen %d mode %d timed out\n", cmd, (int)inlen, mcdi->mode); } else { - size_t resplen; + size_t hdr_len, data_len; /* At the very least we need a memory barrier here to ensure * we pick up changes from efx_mcdi_ev_cpl(). Protect against @@ -427,16 +421,17 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); rc = mcdi->resprc; - resplen = mcdi->resp_data_len; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; spin_unlock_bh(&mcdi->iface_lock); BUG_ON(rc > 0); if (rc == 0) { - efx_mcdi_copyout(efx, outbuf, - min(outlen, mcdi->resp_data_len)); + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(outlen, data_len)); if (outlen_actual != NULL) - *outlen_actual = resplen; + *outlen_actual = data_len; } else if (cmd == MC_CMD_REBOOT && rc == -EIO) ; /* Don't reset if MC_CMD_REBOOT returns EIO */ else if (rc == -EIO || rc == -EINTR) { -- cgit v0.10.2 From f76fe120d81c96fa2a17ae41f0647c963dbb43cd Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 26 Oct 2012 17:53:11 +0100 Subject: sfc: Update and improve kernel-doc for efx_mcdi_state & efx_mcdi_iface Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 0cb0d5a..69586e0 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -11,14 +11,14 @@ #define EFX_MCDI_H /** - * enum efx_mcdi_state + * enum efx_mcdi_state - MCDI request handling state * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the - * mcdi_lock then they are able to move to MCDI_STATE_RUNNING + * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that * moved into this state is allowed to move out of it. * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread * has not yet consumed the result. For all other threads, equivalent to - * MCDI_STATE_RUNNING. + * %MCDI_STATE_RUNNING. */ enum efx_mcdi_state { MCDI_STATE_QUIESCENT, @@ -32,25 +32,23 @@ enum efx_mcdi_mode { }; /** - * struct efx_mcdi_iface - * @state: Interface state. Waited for by mcdi_wq. - * @wq: Wait queue for threads waiting for state != STATE_RUNNING - * @iface_lock: Protects @credits, @seqno, @resprc, @resplen + * struct efx_mcdi_iface - MCDI protocol context + * @state: Request handling state. Waited for by @wq. * @mode: Poll for mcdi completion, or wait for an mcdi_event. - * Serialised by @lock + * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING + * @iface_lock: Serialises access to all the following fields * @seqno: The next sequence number to use for mcdi requests. - * Serialised by @lock * @credits: Number of spurious MCDI completion events allowed before we - * trigger a fatal error. Protected by @lock + * trigger a fatal error * @resprc: Response error/success code (Linux numbering) * @resp_hdr_len: Response header length * @resp_data_len: Response data (SDU or error) length */ struct efx_mcdi_iface { atomic_t state; + enum efx_mcdi_mode mode; wait_queue_head_t wq; spinlock_t iface_lock; - enum efx_mcdi_mode mode; unsigned int credits; unsigned int seqno; int resprc; -- cgit v0.10.2 From b105798fa5597f248256fa03ec25c2fbef922f92 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 19 Sep 2012 00:56:47 +0100 Subject: sfc: Get rid of per-NIC-type phys_addr_channels and mem_map_size EF10 functions don't have a fixed BAR size, and the minimum is not large enough for all the queues we might want to allocate. We have to find out the BAR size at run-time, and therefore phys_addr_channels and mem_map_size cannot be defined per-NIC-type. Change efx_nic_type::mem_map_size to a function pointer which is called to find the wanted memory map size (before probe). Replace efx_nic_type::phys_addr_channels with efx_nic::max_channels, to be initialised by the probe function. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 872b9f5..3977926 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1084,6 +1084,7 @@ static int efx_init_io(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; dma_addr_t dma_mask = efx->type->max_dma_mask; + unsigned int mem_map_size = efx->type->mem_map_size(efx); int rc; netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); @@ -1136,20 +1137,18 @@ static int efx_init_io(struct efx_nic *efx) rc = -EIO; goto fail3; } - efx->membase = ioremap_nocache(efx->membase_phys, - efx->type->mem_map_size); + efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size); + (unsigned long long)efx->membase_phys, mem_map_size); rc = -ENOMEM; goto fail4; } netif_dbg(efx, probe, efx->net_dev, "memory BAR at %llx+%x (virtual %p)\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size, efx->membase); + (unsigned long long)efx->membase_phys, mem_map_size, + efx->membase); return 0; @@ -1228,8 +1227,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) */ static int efx_probe_interrupts(struct efx_nic *efx) { - unsigned int max_channels = - min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS); unsigned int extra_channels = 0; unsigned int i, j; int rc; @@ -1246,7 +1243,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) if (separate_tx_channels) n_channels *= 2; n_channels += extra_channels; - n_channels = min(n_channels, max_channels); + n_channels = min(n_channels, efx->max_channels); for (i = 0; i < n_channels; i++) xentries[i].entry = i; @@ -2489,8 +2486,6 @@ static int efx_init_struct(struct efx_nic *efx, efx->msi_context[i].index = i; } - EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); - /* Higher numbered interrupt modes are less capable! */ efx->interrupt_mode = max(efx->type->max_interrupt_mode, interrupt_mode); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index e556a5d..17bd7dc 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -1970,6 +1970,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx) large_eeprom_type); } +static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx) +{ + return 0x20000; +} + +static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx) +{ + /* Map everything up to and including the RSS indirection table. + * The PCI core takes care of mapping the MSI-X tables. + */ + return FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS; +} + static int falcon_probe_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data; @@ -2060,6 +2074,8 @@ static int falcon_probe_nic(struct efx_nic *efx) goto fail5; } + efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 : + EFX_MAX_CHANNELS); efx->timer_quantum_ns = 4968; /* 621 cycles */ /* Initialise I2C adapter */ @@ -2339,6 +2355,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type) */ const struct efx_nic_type falcon_a1_nic_type = { + .mem_map_size = falcon_a1_mem_map_size, .probe = falcon_probe_nic, .remove = falcon_remove_nic, .init = falcon_init_nic, @@ -2391,7 +2408,6 @@ const struct efx_nic_type falcon_a1_nic_type = { .ev_test_generate = efx_farch_ev_test_generate, .revision = EFX_REV_FALCON_A1, - .mem_map_size = 0x20000, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, @@ -2401,13 +2417,13 @@ const struct efx_nic_type falcon_a1_nic_type = { .rx_buffer_padding = 0x24, .can_rx_scatter = false, .max_interrupt_mode = EFX_INT_MODE_MSI, - .phys_addr_channels = 4, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM, .mcdi_max_ver = -1, }; const struct efx_nic_type falcon_b0_nic_type = { + .mem_map_size = falcon_b0_mem_map_size, .probe = falcon_probe_nic, .remove = falcon_remove_nic, .init = falcon_init_nic, @@ -2461,12 +2477,6 @@ const struct efx_nic_type falcon_b0_nic_type = { .ev_test_generate = efx_farch_ev_test_generate, .revision = EFX_REV_FALCON_B0, - /* Map everything up to and including the RSS indirection - * table. Don't map MSI-X table, MSI-X PBA since Linux - * requires that they not be mapped. */ - .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL + - FR_BZ_RX_INDIRECTION_TBL_STEP * - FR_BZ_RX_INDIRECTION_TBL_ROWS), .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, @@ -2477,9 +2487,6 @@ const struct efx_nic_type falcon_b0_nic_type = { .rx_buffer_padding = 0, .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, - .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy - * interrupt handler only supports 32 - * channels */ .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .mcdi_max_ver = -1, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index e1deec4..7aa0e0f 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -832,6 +832,8 @@ struct efx_nic { unsigned rx_dc_base; unsigned sram_lim_qw; unsigned next_buffer_table; + + unsigned int max_channels; unsigned n_channels; unsigned n_rx_channels; unsigned rss_spread; @@ -939,6 +941,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) /** * struct efx_nic_type - Efx device type definition + * @mem_map_size: Get memory BAR mapped size * @probe: Probe the controller * @remove: Free resources allocated by probe() * @init: Initialise the controller @@ -1012,7 +1015,6 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ * @ev_test_generate: Generate a test event * @revision: Hardware architecture revision - * @mem_map_size: Memory BAR mapped size * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address * @buf_tbl_base: Buffer table base address @@ -1024,14 +1026,13 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @can_rx_scatter: NIC is able to scatter packet to multiple buffers * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. - * @phys_addr_channels: Number of channels with physically addressed - * descriptors * @timer_period_max: Maximum period of interrupt timer (in ticks) * @offload_features: net_device feature flags for protocol offload * features implemented in hardware * @mcdi_max_ver: Maximum MCDI version supported */ struct efx_nic_type { + unsigned int (*mem_map_size)(struct efx_nic *efx); int (*probe)(struct efx_nic *efx); void (*remove)(struct efx_nic *efx); int (*init)(struct efx_nic *efx); @@ -1092,7 +1093,6 @@ struct efx_nic_type { void (*ev_test_generate)(struct efx_channel *channel); int revision; - unsigned int mem_map_size; unsigned int txd_ptr_tbl_base; unsigned int rxd_ptr_tbl_base; unsigned int buf_tbl_base; @@ -1103,7 +1103,6 @@ struct efx_nic_type { unsigned int rx_buffer_padding; bool can_rx_scatter; unsigned int max_interrupt_mode; - unsigned int phys_addr_channels; unsigned int timer_period_max; netdev_features_t offload_features; int mcdi_max_ver; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index fee0d2d..23e5731 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -187,6 +187,12 @@ static void siena_dimension_resources(struct efx_nic *efx) efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); } +static unsigned int siena_mem_map_size(struct efx_nic *efx) +{ + return FR_CZ_MC_TREG_SMEM + + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS; +} + static int siena_probe_nic(struct efx_nic *efx) { struct siena_nic_data *nic_data; @@ -207,6 +213,8 @@ static int siena_probe_nic(struct efx_nic *efx) goto fail1; } + efx->max_channels = EFX_MAX_CHANNELS; + efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; @@ -670,6 +678,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx) */ const struct efx_nic_type siena_a0_nic_type = { + .mem_map_size = siena_mem_map_size, .probe = siena_probe_nic, .remove = siena_remove_nic, .init = siena_init_nic, @@ -729,8 +738,6 @@ const struct efx_nic_type siena_a0_nic_type = { .ev_test_generate = efx_farch_ev_test_generate, .revision = EFX_REV_SIENA_A0, - .mem_map_size = (FR_CZ_MC_TREG_SMEM + - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, @@ -741,9 +748,6 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_buffer_padding = 0, .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, - .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy - * interrupt handler only supports 32 - * channels */ .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE), -- cgit v0.10.2 From ab3b82506046c0038e46bb793591b9a758835645 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 5 Oct 2012 19:31:02 +0100 Subject: sfc: EFX_WORKAROUND_ALWAYS is really specific to Falcon-architecture The workarounds that currently use EFX_WORKAROUND_ALWAYS are in Falcon-specific or Falcon-arch-specific code, so get rid of the conditions altogether. Add/move comments as appropriate. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 17bd7dc..0fd8a88 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -686,7 +686,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx) return; /* We expect xgmii faults if the wireside link is down */ - if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) + if (!efx->link_state.up) return; /* We can only use this interrupt to signal the negative edge of @@ -795,29 +795,22 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); + bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; /* XGXS block is flaky and will need to be reset if moving * into our out of XGMII, XGXS or XAUI loopbacks. */ - if (EFX_WORKAROUND_5147(efx)) { - bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; - bool reset_xgxs; - - efx_reado(efx, ®, FR_AB_XX_CORE_STAT); - old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); - old_xgmii_loopback = - EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); - - efx_reado(efx, ®, FR_AB_XX_SD_CTL); - old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); + efx_reado(efx, ®, FR_AB_XX_CORE_STAT); + old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); + old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); - /* The PHY driver may have turned XAUI off */ - reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || - (xaui_loopback != old_xaui_loopback) || - (xgmii_loopback != old_xgmii_loopback)); + efx_reado(efx, ®, FR_AB_XX_SD_CTL); + old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); - if (reset_xgxs) - falcon_reset_xaui(efx); - } + /* The PHY driver may have turned XAUI off */ + if ((xgxs_loopback != old_xgxs_loopback) || + (xaui_loopback != old_xaui_loopback) || + (xgmii_loopback != old_xgmii_loopback)) + falcon_reset_xaui(efx); efx_reado(efx, ®, FR_AB_XX_CORE_STAT); EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, @@ -946,8 +939,8 @@ static void falcon_poll_xmac(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; - if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || - !nic_data->xmac_poll_required) + /* We expect xgmii faults if the wireside link is down */ + if (!efx->link_state.up || !nic_data->xmac_poll_required) return; nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index c3d07c5..7f50882 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -830,8 +830,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) netif_tx_lock(efx->net_dev); efx_farch_notify_tx_desc(tx_queue); netif_tx_unlock(efx->net_dev); - } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && - EFX_WORKAROUND_10727(efx)) { + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); } else { netif_err(efx, tx_err, efx->net_dev, @@ -1531,8 +1530,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) } if (queues != 0) { - if (EFX_WORKAROUND_15783(efx)) - efx->irq_zero_count = 0; + efx->irq_zero_count = 0; /* Schedule processing of any interrupting queues */ if (likely(soft_enabled)) { @@ -1544,9 +1542,11 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) } result = IRQ_HANDLED; - } else if (EFX_WORKAROUND_15783(efx)) { + } else { efx_qword_t *event; + /* Legacy ISR read can return zero once (SF bug 15783) */ + /* We can't return IRQ_HANDLED more than once on seeing ISR=0 * because this might be a shared interrupt. */ if (efx->irq_zero_count++ == 0) diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index dff565a..7e5be1d 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -15,25 +15,15 @@ * Bug numbers are from Solarflare's Bugzilla. */ -#define EFX_WORKAROUND_ALWAYS(efx) 1 #define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) #define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) #define EFX_WORKAROUND_10G(efx) 1 -/* XAUI resets if link not detected */ -#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS -/* RX PCIe double split performance issue */ -#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS /* Bit-bashed I2C reads cause performance drop */ #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G -/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor - * or a PCIe error (bug 11028) */ -#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS /* Truncated IPv4 packets can confuse the TX packet parser */ #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB -/* Legacy ISR read can return zero once */ -#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS /* Legacy interrupt storm when interrupt fifo fills */ #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA -- cgit v0.10.2 From be3fc09cdd4aca438613b1d50d00afa77e9b2046 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 8 Oct 2012 18:21:51 +0100 Subject: sfc: Do not assume efx_nic_type::ev_fini is idempotent efx_fini_eventq() needs to be idempotent but EF10 firmware is picky about queue states. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3977926..49d06ca 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -339,6 +339,7 @@ static void efx_init_eventq(struct efx_channel *channel) channel->eventq_read_ptr = 0; efx_nic_init_eventq(channel); + channel->eventq_init = true; } /* Enable event queue processing and NAPI */ @@ -367,10 +368,14 @@ static void efx_stop_eventq(struct efx_channel *channel) static void efx_fini_eventq(struct efx_channel *channel) { + if (!channel->eventq_init) + return; + netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d fini event queue\n", channel->channel); efx_nic_fini_eventq(channel); + channel->eventq_init = false; } static void efx_remove_eventq(struct efx_channel *channel) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 7aa0e0f..694c572 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -356,6 +356,7 @@ enum efx_rx_alloc_method { * @efx: Associated Efx NIC * @channel: Channel instance number * @type: Channel type definition + * @eventq_init: Event queue initialised flag * @enabled: Channel enabled indicator * @irq: IRQ number (MSI and MSI-X only) * @irq_moderation: IRQ moderation value (in hardware ticks) @@ -387,6 +388,7 @@ struct efx_channel { struct efx_nic *efx; int channel; const struct efx_channel_type *type; + bool eventq_init; bool enabled; int irq; unsigned int irq_moderation; -- cgit v0.10.2 From fb7589a162162223e6bb6422dde3fb1ce07d9a78 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Wed, 21 Aug 2013 14:31:38 +0400 Subject: tun: Add ability to create tun device with given index Tun devices cannot be created with ifidex user wants, but it's required by checkpoint-restore project. Long time ago such ability was implemented for rtnl_ops-based interface for creating links (9c7dafbf net: Allow to create links with given ifindex), but the only API for creating and managing tuntap devices is ioctl-based and is evolving with adding new ones (cde8b15f tuntap: add ioctl to attach or detach a file form tuntap device). Following that trend, here's how a new ioctl that sets the ifindex for device, that _will_ be created by TUNSETIFF ioctl looks like. So those who want a tuntap device with the ifindex N, should open the tun device, call ioctl(fd, TUNSETIFINDEX, &N), then call TUNSETIFF. If the index N is busy, then the register_netdev will find this out and the ioctl would be failed with -EBUSY. If setifindex is not called, then it will be generated as before. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7ed13cc..4b65fbc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -138,7 +138,10 @@ struct tun_file { struct fasync_struct *fasync; /* only used for fasnyc */ unsigned int flags; - u16 queue_index; + union { + u16 queue_index; + unsigned int ifindex; + }; struct list_head next; struct tun_struct *detached; }; @@ -1601,6 +1604,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; + dev->ifindex = tfile->ifindex; tun = netdev_priv(dev); tun->dev = dev; @@ -1817,6 +1821,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, kgid_t group; int sndbuf; int vnet_hdr_sz; + unsigned int ifindex; int ret; if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { @@ -1851,6 +1856,19 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = -EFAULT; goto unlock; } + if (cmd == TUNSETIFINDEX) { + ret = -EPERM; + if (tun) + goto unlock; + + ret = -EFAULT; + if (copy_from_user(&ifindex, argp, sizeof(ifindex))) + goto unlock; + + ret = 0; + tfile->ifindex = ifindex; + goto unlock; + } ret = -EBADFD; if (!tun) @@ -2099,6 +2117,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) rcu_assign_pointer(tfile->tun, NULL); tfile->net = get_net(current->nsproxy->net_ns); tfile->flags = 0; + tfile->ifindex = 0; rcu_assign_pointer(tfile->socket.wq, &tfile->wq); init_waitqueue_head(&tfile->wq.wait); diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index 1870ee2..c58d023 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -56,6 +56,7 @@ #define TUNGETVNETHDRSZ _IOR('T', 215, int) #define TUNSETVNETHDRSZ _IOW('T', 216, int) #define TUNSETQUEUE _IOW('T', 217, int) +#define TUNSETIFINDEX _IOW('T', 218, unsigned int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 -- cgit v0.10.2 From 3d407a80b62fc5891b41fe9045f23aba4437fc33 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Wed, 21 Aug 2013 14:32:00 +0400 Subject: tun: Report whether the queue is attached or not Multiqueue tun devices allow to attach and detach from its queues while keeping the interface itself set on file. Knowing this is critical for the checkpoint part of criu project. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 4b65fbc..db43a24 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1881,6 +1881,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case TUNGETIFF: tun_get_iff(current->nsproxy->net_ns, tun, &ifr); + if (tfile->detached) + ifr.ifr_flags |= IFF_DETACH_QUEUE; + if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; -- cgit v0.10.2 From 849c9b6f93cc4cb5eb59301b6380a7a81b43f414 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Wed, 21 Aug 2013 14:32:21 +0400 Subject: tun: Allow to skip filter on attach There's a small problem with sk-filters on tun devices. Consider an application doing this sequence of steps: fd = open("/dev/net/tun"); ioctl(fd, TUNSETIFF, { .ifr_name = "tun0" }); ioctl(fd, TUNATTACHFILTER, &my_filter); ioctl(fd, TUNSETPERSIST, 1); close(fd); At that point the tun0 will remain in the system and will keep in mind that there should be a socket filter at address '&my_filter'. If after that we do fd = open("/dev/net/tun"); ioctl(fd, TUNSETIFF, { .ifr_name = "tun0" }); we most likely receive the -EFAULT error, since tun_attach() would try to connect the filter back. But (!) if we provide a filter at address &my_filter, then tun0 will be created and the "new" filter would be attached, but application may not know about that. This may create certain problems to anyone using tun-s, but it's critical problem for c/r -- if we meet a persistent tun device with a filter in mind, we will not be able to attach to it to dump its state (flags, owner, address, vnethdr size, etc.). The proposal is to allow to attach to tun device (with TUNSETIFF) w/o attaching the filter to the tun-file's socket. After this attach app may e.g clean the device by dropping the filter, it doesn't want to have one, or (in case of c/r) get information about the device with tun ioctls. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db43a24..6acbdbc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -501,7 +501,7 @@ static void tun_detach_all(struct net_device *dev) module_put(THIS_MODULE); } -static int tun_attach(struct tun_struct *tun, struct file *file) +static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter) { struct tun_file *tfile = file->private_data; int err; @@ -526,7 +526,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file) err = 0; /* Re-attach the filter to presist device */ - if (tun->filter_attached == true) { + if (!skip_filter && (tun->filter_attached == true)) { err = sk_attach_filter(&tun->fprog, tfile->socket.sk); if (!err) goto out; @@ -1557,7 +1557,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (err < 0) return err; - err = tun_attach(tun, file); + err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER); if (err < 0) return err; @@ -1631,7 +1631,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) dev->vlan_features = dev->features; INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file); + err = tun_attach(tun, file, false); if (err < 0) goto err_free_dev; @@ -1795,7 +1795,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) ret = security_tun_dev_attach_queue(tun->security); if (ret < 0) goto unlock; - ret = tun_attach(tun, file); + ret = tun_attach(tun, file, false); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) @@ -1883,6 +1883,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, if (tfile->detached) ifr.ifr_flags |= IFF_DETACH_QUEUE; + if (!tfile->socket.sk->sk_filter) + ifr.ifr_flags |= IFF_NOFILTER; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index c58d023..cc127b2 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -71,6 +71,7 @@ #define IFF_DETACH_QUEUE 0x0400 /* read-only flag */ #define IFF_PERSIST 0x0800 +#define IFF_NOFILTER 0x1000 /* Socket options */ #define TUN_TX_TIMESTAMP 1 -- cgit v0.10.2 From 76975e9cb4a7c6fe39478a3dc4dd292a5c6c8c74 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Wed, 21 Aug 2013 14:32:39 +0400 Subject: tun: Get skfilter layout The only thing we may have from tun device is the fprog, whic contains the number of filter elements and a pointer to (user-space) memory where the elements are. The program itself may not be available if the device is persistent and detached. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 6acbdbc..60a1e93 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2042,6 +2042,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tun_detach_filter(tun, tun->numqueues); break; + case TUNGETFILTER: + ret = -EINVAL; + if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) + break; + ret = -EFAULT; + if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) + break; + ret = 0; + break; + default: ret = -EINVAL; break; diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index cc127b2..e9502dd 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -57,6 +57,7 @@ #define TUNSETVNETHDRSZ _IOW('T', 216, int) #define TUNSETQUEUE _IOW('T', 217, int) #define TUNSETIFINDEX _IOW('T', 218, unsigned int) +#define TUNGETFILTER _IOR('T', 219, struct sock_fprog) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 -- cgit v0.10.2 From 0b251835c65b0bf14befaac453dc978167d0d0eb Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:13 +0800 Subject: net: fsl_pq_mdio: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index b7dcd41..c4f6506 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -468,8 +468,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev) mdiobus_unregister(bus); - dev_set_drvdata(device, NULL); - iounmap(priv->map); mdiobus_free(bus); -- cgit v0.10.2 From 01007f5cfb441ca5f705009ba51ea50be4bc7dc7 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:18 +0800 Subject: net: ucc_geth: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 533885c..5930c39 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3917,7 +3917,6 @@ static int ucc_geth_remove(struct platform_device* ofdev) unregister_netdev(dev); free_netdev(dev); ucc_geth_memclean(ugeth); - dev_set_drvdata(device, NULL); return 0; } -- cgit v0.10.2 From dcbe8a1a42cbd9488eda68f5a8bd1d90fc7ad8fe Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:22 +0800 Subject: net: fec_mpc52xx_phy: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index eb44797..e052890 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -127,7 +127,6 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) struct mpc52xx_fec_mdio_priv *priv = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(dev, NULL); iounmap(priv->regs); kfree(priv); mdiobus_free(bus); -- cgit v0.10.2 From 63f076e96071eb6611e7537456299ca29feb63c0 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:27 +0800 Subject: net: sunbmac: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 0a32ca5..7217ee5 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1259,8 +1259,6 @@ static int bigmac_sbus_remove(struct platform_device *op) free_netdev(net_dev); - dev_set_drvdata(&op->dev, NULL); - return 0; } -- cgit v0.10.2 From 7d9a875a76755e2da8f64ca962ea3f5147f2467f Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:31 +0800 Subject: net: sunhme: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index b90d311..c67e683 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3250,8 +3250,6 @@ static int hme_sbus_remove(struct platform_device *op) free_netdev(net_dev); - dev_set_drvdata(&op->dev, NULL); - return 0; } -- cgit v0.10.2 From 0c671dc0c4979e69e0b342c8f4e80690660929db Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:36 +0800 Subject: net: xilinx_emaclite: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Acked-by: Michal Simek Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 7c1ccbc..4c619ea 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1249,7 +1249,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev) lp->phy_node = NULL; xemaclite_remove_ndev(ndev, of_dev); - dev_set_drvdata(dev, NULL); return 0; } -- cgit v0.10.2 From 110a183c8a1c61b9de7b5c19421beb6e585181d4 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:41 +0800 Subject: net: davinci_mdio: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 01b0cc5..7f85143 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -433,8 +433,6 @@ static int davinci_mdio_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - dev_set_drvdata(dev, NULL); - kfree(data); return 0; -- cgit v0.10.2 From e6b0260c2a154948be825b34e68135c127123d5a Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 15:02:45 +0800 Subject: net: emac: remove unnecessary dev_set_drvdata() Unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index d300a0c..2d3b064 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2955,8 +2955,6 @@ static int emac_remove(struct platform_device *ofdev) DBG(dev, "remove" NL); - dev_set_drvdata(&ofdev->dev, NULL); - unregister_netdev(dev->ndev); cancel_work_sync(&dev->reset_work); -- cgit v0.10.2 From 327dfd889527ff2313cc4734a3874e1de2e2e656 Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Wed, 21 Aug 2013 09:11:14 +0200 Subject: stmmac: remove useless csum flag This patch removes the no_csum_insertion private parameter that is not used anymore and, also, the "likely" annotation from the condition that is not in a critical path. Signed-off-by: Giuseppe Cavallaro Cc: Sonic Zhang Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c922fde..f16a9bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -70,7 +70,6 @@ struct stmmac_priv { struct net_device *dev; struct device *device; struct mac_device_info *hw; - int no_csum_insertion; spinlock_t lock; struct phy_device *phydev ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0a9bb9d..be40691 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1224,8 +1224,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { - if (likely(priv->plat->force_sf_dma_mode || - ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) { + if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { /* * In case of GMAC, SF mode can be enabled * to perform the TX COE in HW. This depends on: -- cgit v0.10.2 From 9bcadaef9189cce6b712a40ec2a359629e06795f Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 18:15:11 +0800 Subject: net: irda: pxaficp_ir: use platform_set_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using deva_set_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 964b116..3eeaaf8 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -915,7 +915,7 @@ static int pxa_irda_probe(struct platform_device *pdev) err = register_netdev(dev); if (err == 0) - dev_set_drvdata(&pdev->dev, dev); + platform_set_drvdata(pdev, dev); if (err) { if (si->pdata->shutdown) -- cgit v0.10.2 From f8825669b3165bbf4380eabfd641f693d02af866 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 18:15:15 +0800 Subject: net: phy: mdio-octeon: use platform_set_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_set_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: David S. Miller diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index b51fa1f..7f18f80 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -222,7 +222,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) bus->mii_bus->read = octeon_mdiobus_read; bus->mii_bus->write = octeon_mdiobus_write; - dev_set_drvdata(&pdev->dev, bus); + platform_set_drvdata(pdev, bus); err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node); if (err) -- cgit v0.10.2 From e133fae263090f5795b8024a4024b81e06770132 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 22 Aug 2013 08:36:41 +0200 Subject: mac80211: minstrel_ht: don't use control.flags in TX status path Sujith reports that my commit af61a165187bb94b1dc7628ef815c23d0eacf40b ("mac80211: add control port protocol TX control flag") broke ath9k (aggregation). The reason is that I made minstrel_ht use the flag in the TX status path, where it can have been overwritten by the driver. Since we have no more space in info->flags, revert that part of the change for now, until we can reshuffle the flags or so. Reported-by: Sujith Manoharan Signed-off-by: Johannes Berg diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 9eff382..c397ff5 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -439,13 +439,12 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct sta_info *sta = container_of(pubsta, struct sta_info, sta); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u16 tid; if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) return; - if (unlikely(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) + if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) return; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; -- cgit v0.10.2 From 6439fbce1075dedfe23165c136f19f44c00ca132 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Wed, 21 Aug 2013 17:09:33 +0800 Subject: can: c_can: fix error checking of priv->instance in probe() This patch adds a type cast from 'unsigned int' to 'int'. 'priv->instance' may less than zero, so need a type cast, the related warnings (allmodconfig, "EXTRA_CFLAGS=-W"): drivers/net/can/c_can/c_can_platform.c:198:3: warning: comparison of unsigned expression < 0 is always false [-Wtype-limits] Signed-off-by: Chen Gang Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index c6f838d..294ced3 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -195,7 +195,7 @@ static int c_can_plat_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) + if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0) dev_info(&pdev->dev, "control memory is not used for raminit\n"); else priv->raminit = c_can_hw_raminit; -- cgit v0.10.2 From 40f7e0dd05f1902c05ae4e4e8cba4a24c3f47c3a Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 18:15:03 +0800 Subject: can: at91_can: use platform_set_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_set_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index dbbe97a..3b1ff61 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1355,7 +1355,7 @@ static int at91_can_probe(struct platform_device *pdev) if (at91_is_sam9263(priv)) dev->sysfs_groups[0] = &at91_sysfs_attr_group; - dev_set_drvdata(&pdev->dev, dev); + platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_candev(dev); -- cgit v0.10.2 From d75ea942b360690a380da8012a51eaf6a6ebb1b1 Mon Sep 17 00:00:00 2001 From: Libo Chen Date: Wed, 21 Aug 2013 18:15:08 +0800 Subject: can: flexcan: use platform_set_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_set_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Signed-off-by: Libo Chen Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index c48174e..71c677e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1083,7 +1083,7 @@ static int flexcan_probe(struct platform_device *pdev) netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); - dev_set_drvdata(&pdev->dev, dev); + platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_flexcandev(dev); -- cgit v0.10.2 From 8a650aa258237f41ea6177a758a529a6e8d6f9e5 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Fri, 24 May 2013 07:20:57 +0000 Subject: igb: Reset the link when EEE setting changed This patch resets the link, if link is up - whenever users enable or disable EEE Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 85fe7b5..6d861a5 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2672,7 +2672,9 @@ static int igb_set_eee(struct net_device *netdev, igb_set_eee_i350(hw); /* reset link */ - if (!netif_running(netdev)) + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else igb_reset(adapter); } -- cgit v0.10.2 From e5c3370ffbe1336f7ee01233ba6b48a1ac06ec07 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 6 Jun 2013 01:31:09 +0000 Subject: igb: Read register for latch_on without return value This patch changes register read to "just-read" without returning a value for hardware to accurately latch the register value. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index f21a91a..9057d10 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1320,7 +1320,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) **/ static s32 igb_reset_hw_82575(struct e1000_hw *hw) { - u32 ctrl, icr; + u32 ctrl; s32 ret_val; /* Prevent the PCI-E bus from sticking if there is no TLP connection @@ -1365,7 +1365,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw) /* Clear any pending interrupt events. */ wr32(E1000_IMC, 0xffffffff); - icr = rd32(E1000_ICR); + rd32(E1000_ICR); /* Install any alternate MAC address into RAR0 */ ret_val = igb_check_alt_mac_addr(hw); @@ -2103,10 +2103,9 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) s32 ret_val = 0; /* BH SW mailbox bit in SW_FW_SYNC */ u16 swmbsw_mask = E1000_SW_SYNCH_MB; - u32 ctrl, icr; + u32 ctrl; bool global_device_reset = hw->dev_spec._82575.global_device_reset; - hw->dev_spec._82575.global_device_reset = false; /* due to hw errata, global device reset doesn't always @@ -2165,7 +2164,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) /* Clear any pending interrupt events. */ wr32(E1000_IMC, 0xffffffff); - icr = rd32(E1000_ICR); + rd32(E1000_ICR); ret_val = igb_reset_mdicnfg_82580(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c1d72c0..090ee56 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3844,7 +3844,6 @@ bool igb_has_link(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; bool link_active = false; - s32 ret_val = 0; /* get_link_status is set on LSC (link status) interrupt or * rx sequence error interrupt. get_link_status will stay @@ -3853,16 +3852,11 @@ bool igb_has_link(struct igb_adapter *adapter) */ switch (hw->phy.media_type) { case e1000_media_type_copper: - if (hw->mac.get_link_status) { - ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - } else { - link_active = true; - } - break; + if (!hw->mac.get_link_status) + return true; case e1000_media_type_internal_serdes: - ret_val = hw->mac.ops.check_for_link(hw); - link_active = hw->mac.serdes_has_link; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; break; default: case e1000_media_type_unknown: diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 7e8c477..5a54e3d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -97,14 +97,14 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) { struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); struct e1000_hw *hw = &igb->hw; + u32 lo, hi; u64 val; - u32 lo, hi, jk; /* The timestamp latches on lowest register read. For the 82580 * the lowest register is SYSTIMR instead of SYSTIML. However we only * need to provide nanosecond resolution, so we just ignore it. */ - jk = rd32(E1000_SYSTIMR); + rd32(E1000_SYSTIMR); lo = rd32(E1000_SYSTIML); hi = rd32(E1000_SYSTIMH); @@ -118,13 +118,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) { struct e1000_hw *hw = &adapter->hw; - u32 sec, nsec, jk; + u32 sec, nsec; /* The timestamp latches on lowest register read. For I210/I211, the * lowest register is SYSTIMR. Since we only need to provide nanosecond * resolution, we can ignore it. */ - jk = rd32(E1000_SYSTIMR); + rd32(E1000_SYSTIMR); nsec = rd32(E1000_SYSTIML); sec = rd32(E1000_SYSTIMH); -- cgit v0.10.2 From 7f90128e246ea9827dd5ac532e49611aaf001892 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 27 Jun 2013 09:10:23 +0000 Subject: igb: Added rcu_lock to avoid race This patch adds rcu_lock to avoid possible race condition with igb_update_stats function accessing the rings in free_ q_vector. CC: Eric Dumazet Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 090ee56..8e7c1b8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1013,7 +1013,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) adapter->q_vector[v_idx] = NULL; netif_napi_del(&q_vector->napi); - /* ixgbe_get_stats64() might access the rings on this vector, + /* igb_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it. */ kfree_rcu(q_vector, rcu); @@ -4859,6 +4859,8 @@ void igb_update_stats(struct igb_adapter *adapter, bytes = 0; packets = 0; + + rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; @@ -4894,6 +4896,7 @@ void igb_update_stats(struct igb_adapter *adapter, } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; + rcu_read_unlock(); /* read stats registers */ adapter->stats.crcerrs += rd32(E1000_CRCERRS); -- cgit v0.10.2 From 502671967403c611b362836aef8cdee65e9a626a Mon Sep 17 00:00:00 2001 From: Mitch A Williams Date: Thu, 20 Jun 2013 06:03:36 +0000 Subject: igb: don't allow SR-IOV without MSI-X MSI-X interrupts are required for SR-IOV operation. Check to make sure they're enabled before allowing the user to turn on VFs. Signed-off-by: Mitch Williams Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8e7c1b8..94295a0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2436,6 +2436,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) int err = 0; int i; + if (!adapter->msix_entries) { + err = -EPERM; + goto out; + } + if (!num_vfs) goto out; else if (old_vfs && old_vfs == num_vfs) -- cgit v0.10.2 From 2ccd994c4c85a787ba4873f1481edd42c4ff46bf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Jul 2013 00:20:34 +0000 Subject: igb: Update MTU so that it is always at least a standard frame size This change makes it so that we limit the lower bound for max_frame_size to the size of a standard Ethernet frame. This allows for feature parity with other Intel based drivers such as ixgbe. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 94295a0..1a0137d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4813,6 +4813,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); -- cgit v0.10.2 From 5a823d8cdd0e16081bc09a03fa253b9750c4b034 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Tue, 16 Jul 2013 19:17:32 +0000 Subject: igb: Refactor of init_nvm_params This patch refactors the init_nvm_params functions for 82575 and adds a new function for the i210/i211 devices in order to configure separately the NVM functionality for the i210/i211 family. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 9057d10..8d79fac 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -238,6 +238,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ @@ -250,86 +251,52 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) size = 15; nvm->word_size = 1 << size; - if (hw->mac.type < e1000_i210) { - nvm->opcode_bits = 8; - nvm->delay_usec = 1; - - switch (nvm->override) { - case e1000_nvm_override_spi_large: - nvm->page_size = 32; - nvm->address_bits = 16; - break; - case e1000_nvm_override_spi_small: - nvm->page_size = 8; - nvm->address_bits = 8; - break; - default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? - 16 : 8; - break; - } - if (nvm->word_size == (1 << 15)) - nvm->page_size = 128; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; - nvm->type = e1000_nvm_eeprom_spi; - } else { - nvm->type = e1000_nvm_flash_hw; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + nvm->ops.write = igb_write_nvm_spi; + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + /* override generic family function pointers for specific descendants */ switch (hw->mac.type) { case e1000_82580: nvm->ops.validate = igb_validate_nvm_checksum_82580; nvm->ops.update = igb_update_nvm_checksum_82580; - nvm->ops.acquire = igb_acquire_nvm_82575; - nvm->ops.release = igb_release_nvm_82575; - if (nvm->word_size < (1 << 15)) - nvm->ops.read = igb_read_nvm_eerd; - else - nvm->ops.read = igb_read_nvm_spi; - nvm->ops.write = igb_write_nvm_spi; break; case e1000_i354: case e1000_i350: nvm->ops.validate = igb_validate_nvm_checksum_i350; nvm->ops.update = igb_update_nvm_checksum_i350; - nvm->ops.acquire = igb_acquire_nvm_82575; - nvm->ops.release = igb_release_nvm_82575; - if (nvm->word_size < (1 << 15)) - nvm->ops.read = igb_read_nvm_eerd; - else - nvm->ops.read = igb_read_nvm_spi; - nvm->ops.write = igb_write_nvm_spi; - break; - case e1000_i210: - nvm->ops.validate = igb_validate_nvm_checksum_i210; - nvm->ops.update = igb_update_nvm_checksum_i210; - nvm->ops.acquire = igb_acquire_nvm_i210; - nvm->ops.release = igb_release_nvm_i210; - nvm->ops.read = igb_read_nvm_srrd_i210; - nvm->ops.write = igb_write_nvm_srwr_i210; - nvm->ops.valid_led_default = igb_valid_led_default_i210; - break; - case e1000_i211: - nvm->ops.acquire = igb_acquire_nvm_i210; - nvm->ops.release = igb_release_nvm_i210; - nvm->ops.read = igb_read_nvm_i211; - nvm->ops.valid_led_default = igb_valid_led_default_i210; - nvm->ops.validate = NULL; - nvm->ops.update = NULL; - nvm->ops.write = NULL; break; default: - nvm->ops.validate = igb_validate_nvm_checksum; - nvm->ops.update = igb_update_nvm_checksum; - nvm->ops.acquire = igb_acquire_nvm_82575; - nvm->ops.release = igb_release_nvm_82575; - if (nvm->word_size < (1 << 15)) - nvm->ops.read = igb_read_nvm_eerd; - else - nvm->ops.read = igb_read_nvm_spi; - nvm->ops.write = igb_write_nvm_spi; break; } @@ -601,6 +568,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) /* NVM initialization */ ret_val = igb_init_nvm_params_82575(hw); + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + ret_val = igb_init_nvm_params_i210(hw); + break; + default: + break; + } + if (ret_val) goto out; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index aa201ab..1799355 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -620,6 +620,7 @@ #define E1000_EECD_SIZE_EX_SHIFT 11 #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ #define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ #define E1000_FLUDONE_ATTEMPTS 20000 #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ #define E1000_I210_FIFO_SEL_RX 0x00 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 94d7866..4329e8c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -110,6 +110,7 @@ enum e1000_nvm_type { e1000_nvm_none, e1000_nvm_eeprom_spi, e1000_nvm_flash_hw, + e1000_nvm_invm, e1000_nvm_flash_sw }; diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index ddb3cf5..4aebf35 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -661,6 +661,23 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) } /** + * igb_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool igb_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + eec = rd32(E1000_EECD); + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** * igb_update_flash_i210 - Commit EEPROM to the flash * @hw: pointer to the HW structure * @@ -786,3 +803,33 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) { return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); } + +/** + * igb_init_nvm_params_i210 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + struct e1000_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + + /* NVM Function Pointers */ + if (igb_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.write = igb_write_nvm_srwr_i210; + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = igb_read_nvm_i211; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 5caa332..a2a1f70 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -49,6 +49,8 @@ extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +extern bool igb_get_flash_presence_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 -- cgit v0.10.2 From ef3a009297c50876980f21060aee61e8b516a990 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 17 Jul 2013 19:02:53 +0000 Subject: igb: Refactor NVM read functions to accommodate devices with no flash This patch refactors NVM read functions in order to accommodate i210 devices that do not have a flash. Previously, this was not supported on i210 devices. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 1799355..4522198 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -628,6 +628,11 @@ #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ #define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ #define E1000_FLUDONE_ATTEMPTS 20000 diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 4aebf35..0c03933 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -335,57 +335,101 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, } /** - * igb_read_nvm_i211 - Read NVM wrapper function for I211 + * igb_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_read_invm_i210 - Read invm wrapper function for I210/I211 * @hw: pointer to the HW structure * @words: number of words to read * @data: pointer to the data read * * Wrapper function to return data formerly found in the NVM. **/ -s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) +static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 words __always_unused, u16 *data) { s32 ret_val = E1000_SUCCESS; /* Only the MAC addr is required to be present in the iNVM */ switch (offset) { case NVM_MAC_ADDR: - ret_val = igb_read_invm_i211(hw, offset, &data[0]); - ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]); - ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]); + ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); if (ret_val != E1000_SUCCESS) hw_dbg("MAC Addr not found in iNVM\n"); break; case NVM_INIT_CTRL_2: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_INIT_CTRL_2_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_INIT_CTRL_4: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_INIT_CTRL_4_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_LED_1_CFG: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_LED_1_CFG_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_LED_0_2_CFG: - igb_read_invm_i211(hw, offset, data); + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_LED_0_2_CFG_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_ID_LED_SETTINGS: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = ID_LED_RESERVED_FFFF; ret_val = E1000_SUCCESS; @@ -411,48 +455,6 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, } /** - * igb_read_invm_i211 - Reads OTP - * @hw: pointer to the HW structure - * @address: the word address (aka eeprom offset) to read - * @data: pointer to the data read - * - * Reads 16-bit words from the OTP. Return error when the word is not - * stored in OTP. - **/ -s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data) -{ - s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; - u32 invm_dword; - u16 i; - u8 record_type, word_address; - - for (i = 0; i < E1000_INVM_SIZE; i++) { - invm_dword = rd32(E1000_INVM_DATA_REG(i)); - /* Get record type */ - record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); - if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) - break; - if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) - i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; - if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) - i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; - if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { - word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); - if (word_address == (u8)address) { - *data = INVM_DWORD_TO_WORD_DATA(invm_dword); - hw_dbg("Read INVM Word 0x%02x = %x", - address, *data); - status = E1000_SUCCESS; - break; - } - } - } - if (status != E1000_SUCCESS) - hw_dbg("Requested word 0x%02x not found in OTP\n", address); - return status; -} - -/** * igb_read_invm_version - Reads iNVM version and image type * @hw: pointer to the HW structure * @invm_ver: version structure for the version read @@ -826,7 +828,7 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw) nvm->ops.update = igb_update_nvm_checksum_i210; } else { hw->nvm.type = e1000_nvm_invm; - nvm->ops.read = igb_read_nvm_i211; + nvm->ops.read = igb_read_invm_i210; nvm->ops.write = NULL; nvm->ops.validate = NULL; nvm->ops.update = NULL; diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index a2a1f70..dde3c4b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -35,14 +35,11 @@ extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data); extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw); extern void igb_release_nvm_i210(struct e1000_hw *hw); extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); -extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); extern s32 igb_read_invm_version(struct e1000_hw *hw, struct e1000_fw_version *invm_ver); extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1a0137d..387864d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2166,15 +2166,28 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ hw->mac.ops.reset_hw(hw); - /* make sure the NVM is good , i211 parts have special NVM that - * doesn't contain a checksum + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum */ - if (hw->mac.type != e1000_i211) { + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: if (hw->nvm.ops.validate(hw) < 0) { dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } + break; } /* copy the MAC address out of the NVM */ -- cgit v0.10.2 From 53b87ce37e19939c80aaeaa5e8702b521597ce3e Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Tue, 16 Jul 2013 19:18:36 +0000 Subject: igb: Add device support for flashless SKU of i210 device This patch adds the specific device id support for versions of i210 that do not have flash installed. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 8d79fac..d398fad 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -483,6 +483,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) case E1000_DEV_ID_I210_FIBER: case E1000_DEV_ID_I210_SERDES: case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: mac->type = e1000_i210; break; case E1000_DEV_ID_I211_COPPER: diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 4329e8c..37a9c06 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -67,6 +67,8 @@ struct e1000_hw; #define E1000_DEV_ID_I210_FIBER 0x1536 #define E1000_DEV_ID_I210_SERDES 0x1537 #define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C #define E1000_DEV_ID_I211_COPPER 0x1539 #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 #define E1000_DEV_ID_I354_SGMII 0x1F41 diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 6d861a5..03137e2 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1335,12 +1335,23 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) { + struct e1000_hw *hw = &adapter->hw; + *data = 0; - /* Validate eeprom on all parts but i211 */ - if (adapter->hw.mac.type != e1000_i211) { + /* Validate eeprom on all parts but flashless */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + } + break; + default: if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) *data = 2; + break; } return *data; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 387864d..a53c77b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -85,6 +85,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, -- cgit v0.10.2 From 7dc98a623392b39f8670755d4b65968b80f01716 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Tue, 16 Jul 2013 19:25:33 +0000 Subject: igb: Fix get_fw_version function for all parts This patch fixes issues found with older parts and older NVM tools in the display of the version in ethtool. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 4522198..60559af 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -671,20 +671,26 @@ #define NVM_INIT_CTRL_4 0x0013 #define NVM_LED_1_CFG 0x001C #define NVM_LED_0_2_CFG 0x001F - -/* NVM version defines */ #define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 #define NVM_COMB_VER_OFF 0x0083 #define NVM_COMB_VER_PTR 0x003d -#define NVM_MAJOR_MASK 0xF000 -#define NVM_MINOR_MASK 0x0FF0 -#define NVM_BUILD_MASK 0x000F -#define NVM_COMB_VER_MASK 0x00FF -#define NVM_MAJOR_SHIFT 12 -#define NVM_MINOR_SHIFT 4 -#define NVM_COMB_VER_SHFT 8 -#define NVM_VER_INVALID 0xFFFF -#define NVM_ETRACK_SHIFT 16 + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + #define NVM_ETS_CFG 0x003E #define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 #define NVM_ETS_LTHRES_DELTA_SHIFT 6 diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 7f9cd7c..a7db7f3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -709,11 +709,16 @@ out: **/ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) { - u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; - u16 fw_version; + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + /* basic eeprom version numbers and bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); switch (hw->mac.type) { case e1000_i211: igb_read_invm_version(hw, fw_vers); @@ -721,30 +726,30 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) case e1000_82575: case e1000_82576: case e1000_82580: - case e1000_i354: - case e1000_i350: - case e1000_i210: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } break; - default: - return; - } - /* basic eeprom version numbers */ - hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); - fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT; - fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK); - - /* etrack id */ - hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); - hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); - fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl; - - switch (hw->mac.type) { case e1000_i210: - case e1000_i354: + if (!(igb_get_flash_presence_i210(hw))) { + igb_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ case e1000_i350: /* find combo image version */ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); - if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) { + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + 1), 1, &comb_verh); @@ -760,15 +765,42 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) fw_vers->or_major = comb_verl >> NVM_COMB_VER_SHFT; fw_vers->or_build = - ((comb_verl << NVM_COMB_VER_SHFT) - | (comb_verh >> NVM_COMB_VER_SHFT)); + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); fw_vers->or_patch = comb_verh & NVM_COMB_VER_MASK; } } break; default: - break; + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; } return; } diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index 6bfc0c4..433b741 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -44,6 +44,7 @@ struct e1000_fw_version { u32 etrack_id; u16 eep_major; u16 eep_minor; + u16 eep_build; u8 invm_major; u8 invm_minor; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a53c77b..7f6cf65 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1931,12 +1931,17 @@ void igb_set_fw_version(struct igb_adapter *adapter) igb_get_fw_version(hw, &fw); switch (hw->mac.type) { + case e1000_i210: case e1000_i211: - snprintf(adapter->fw_version, sizeof(adapter->fw_version), - "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, fw.invm_img_type); - break; - + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + /* fall through */ default: /* if option is rom valid, display its version too */ if (fw.or_valid) { @@ -1946,11 +1951,16 @@ void igb_set_fw_version(struct igb_adapter *adapter) fw.eep_major, fw.eep_minor, fw.etrack_id, fw.or_major, fw.or_build, fw.or_patch); /* no option rom */ - } else { + } else if (fw.etrack_id != 0X0000) { snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, fw.etrack_id); + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); } break; } -- cgit v0.10.2 From c342b39ea7ca0e46e093cdb346bf52b2b4e71b01 Mon Sep 17 00:00:00 2001 From: Laura Mihaela Vasilescu Date: Wed, 31 Jul 2013 20:19:48 +0000 Subject: igb: Add macro for size of RETA indirection table RETA indirection table is used to assign the received data to a CPU in order to maintain an efficient distribution of network receive processing across multiple CPUs. This patch removes the hard-coded value for the size of the indirection table and defines a new macro. Signed-off-by: Laura Mihaela Vasilescu Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 15ea8dc..5a2659b 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -343,6 +343,8 @@ struct hwmon_buff { }; #endif +#define IGB_RETA_SIZE 128 + /* board specific private data structure */ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7f6cf65..1acd9c0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3157,7 +3157,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) * we are generating the results for n and n+2 and then interleaving * those with the results with n+1 and n+3. */ - for (j = 0; j < 32; j++) { + for (j = 0; j < IGB_RETA_SIZE / 4; j++) { /* first pass generates n and n+2 */ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; u32 reta = (base & 0x07800780) >> (7 - shift); -- cgit v0.10.2 From ed12cc9a145132f5e59919570adff84b318f6010 Mon Sep 17 00:00:00 2001 From: Laura Mihaela Vasilescu Date: Wed, 31 Jul 2013 20:19:54 +0000 Subject: igb: Expose RSS indirection table for ethtool This patch adds the ethtool callbacks necessary to change the RETA indirection table from userspace. In order to achieve this, we add the indirection table field (rss_indir_tbl) in the board specific data structure (struct igb_adapter) to preserve the values across hardware resets. The indirection table must be initialized with default values in the following cases: * at module init time * when the number of RX queues changes. For this reason we add a new field (rss_indir_tbl_init) in igb_adapter that keeps track of the number of RX queues. Whenever the number of RX queues changes, the rss_indir_tbl is modified and initialized with default values. The rss_indir_tbl_init is updated accordingly. CC: Ben Hutchings Signed-off-by: Laura Mihaela Vasilescu Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 5a2659b..c1fae7a 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -446,6 +446,8 @@ struct igb_adapter { struct i2c_algo_bit_data i2c_algo; struct i2c_adapter i2c_adap; struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; }; #define IGB_FLAG_HAS_MSI (1 << 0) @@ -482,6 +484,7 @@ extern int igb_up(struct igb_adapter *); extern void igb_down(struct igb_adapter *); extern void igb_reinit_locked(struct igb_adapter *); extern void igb_reset(struct igb_adapter *); +extern void igb_write_rss_indir_tbl(struct igb_adapter *); extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); extern int igb_setup_tx_resources(struct igb_ring *); extern int igb_setup_rx_resources(struct igb_ring *); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 03137e2..ce9b5a9 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2784,6 +2784,90 @@ static void igb_ethtool_complete(struct net_device *netdev) pm_runtime_put(&adapter->pdev->dev); } +static u32 igb_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGB_RETA_SIZE; +} + +static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + for (i = 0; i < IGB_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +void igb_write_rss_indir_tbl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg = E1000_RETA(0); + u32 shift = 0; + int i = 0; + + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + shift = 3; + break; + default: + break; + } + + while (i < IGB_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; + u32 num_queues; + + num_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_queues = 2; + break; + default: + break; + } + + /* Verify user input. */ + for (i = 0; i < IGB_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + + for (i = 0; i < IGB_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igb_write_rss_indir_tbl(adapter); + + return 0; +} + static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, @@ -2817,6 +2901,9 @@ static const struct ethtool_ops igb_ethtool_ops = { .set_eee = igb_set_eee, .get_module_info = igb_get_module_info, .get_module_eeprom = igb_get_module_eeprom, + .get_rxfh_indir_size = igb_get_rxfh_indir_size, + .get_rxfh_indir = igb_get_rxfh_indir, + .set_rxfh_indir = igb_set_rxfh_indir, .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, }; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1acd9c0..df33c4b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3126,7 +3126,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; - u32 j, num_rx_queues, shift = 0; + u32 j, num_rx_queues; static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, 0xA32DCB77, 0x0CF23080, 0x3BB7426A, @@ -3139,35 +3139,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) num_rx_queues = adapter->rss_queues; switch (hw->mac.type) { - case e1000_82575: - shift = 6; - break; case e1000_82576: /* 82576 supports 2 RSS queues for SR-IOV */ - if (adapter->vfs_allocated_count) { - shift = 3; + if (adapter->vfs_allocated_count) num_rx_queues = 2; - } break; default: break; } - /* Populate the indirection table 4 entries at a time. To do this - * we are generating the results for n and n+2 and then interleaving - * those with the results with n+1 and n+3. - */ - for (j = 0; j < IGB_RETA_SIZE / 4; j++) { - /* first pass generates n and n+2 */ - u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; - u32 reta = (base & 0x07800780) >> (7 - shift); - - /* second pass generates n+1 and n+3 */ - base += 0x00010001 * num_rx_queues; - reta |= (base & 0x07800780) << (1 + shift); - - wr32(E1000_RETA(j), reta); + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; } + igb_write_rss_indir_tbl(adapter); /* Disable raw packet checksumming so that RSS hash is placed in * descriptor on writeback. No need to enable TCP/UDP/IP checksum -- cgit v0.10.2 From 0cf04597b46513d253178acabe451e230dd51ecd Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 2 Aug 2013 03:33:32 +0000 Subject: e1000e: cleanup whitespace in recent commit Commit (c96ddb0b e1000e: Use marco instead of digit for defining e1000_rx_desc_packet_split) moved a define from one file to another but missed using proper indentation/whitespace. CC: Sergei Shtylyov Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index b799fd9..b7f3843 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -233,7 +233,8 @@ union e1000_rx_desc_extended { #define MAX_PS_BUFFERS 4 /* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + /* Receive Descriptor - Packet Split */ union e1000_rx_desc_packet_split { struct { -- cgit v0.10.2 From 13129d9b6183bf93b005ed9ad0eb46122794fe21 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Sat, 3 Aug 2013 01:53:54 +0000 Subject: e1000e: Add code to check for failure of pci_disable_link_state call This patch attempts to work around a problem found with some systems where the call to pci_diable_link_state_locked() fails. As a result, ASPM is not, in fact, disabled. Changing disable ASPM code to check if state actually is disabled after the call and, if not, try another way to disable it. Signed-off-by: Carolyn Wyborny Acked-by: Bruce W. Allan Tested-by: Pavel Machek Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e6d2c0f..59c0c54 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -64,8 +64,6 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); - static const struct e1000_info *e1000_info_tbl[] = { [board_82571] = &e1000_82571_info, [board_82572] = &e1000_82572_info, @@ -6019,38 +6017,73 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) return 0; } -#ifdef CONFIG_PCIEASPM -static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +/** + * e1000e_disable_aspm - Disable ASPM states + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) { + struct pci_dev *parent = pdev->bus->self; + u16 aspm_dis_mask = 0; + u16 pdev_aspmc, parent_aspmc; + + switch (state) { + case PCIE_LINK_STATE_L0S: + case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; + /* fall-through - can't have L1 without L0s */ + case PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; + break; + default: + return; + } + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; + + if (parent) { + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, + &parent_aspmc); + parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; + } + + /* Nothing to do if the ASPM states to be disabled already are */ + if (!(pdev_aspmc & aspm_dis_mask) && + (!parent || !(parent_aspmc & aspm_dis_mask))) + return; + + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? + "L0s" : "", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? + "L1" : ""); + +#ifdef CONFIG_PCIEASPM pci_disable_link_state_locked(pdev, state); -} -#else -static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) -{ - u16 aspm_ctl = 0; - if (state & PCIE_LINK_STATE_L0S) - aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S; - if (state & PCIE_LINK_STATE_L1) - aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1; + /* Double-check ASPM control. If not disabled by the above, the + * BIOS is preventing that from happening (or CONFIG_PCIEASPM is + * not enabled); override by writing PCI config space directly. + */ + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; + + if (!(aspm_dis_mask & pdev_aspmc)) + return; +#endif /* Both device and parent should have the same ASPM setting. * Disable ASPM in downstream component first and then upstream. */ - pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl); - - if (pdev->bus->self) - pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, - aspm_ctl); -} -#endif -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) -{ - dev_info(&pdev->dev, "Disabling ASPM %s %s\n", - (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", - (state & PCIE_LINK_STATE_L1) ? "L1" : ""); + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); - __e1000e_disable_aspm(pdev, state); + if (parent) + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + aspm_dis_mask); } #ifdef CONFIG_PM -- cgit v0.10.2 From e8c254c5feb92c5b9a6950bc7351e2b1b7b09b93 Mon Sep 17 00:00:00 2001 From: Li Zhang Date: Tue, 13 Aug 2013 18:42:58 +0000 Subject: e1000e: Avoid kernel crash during shutdown While doing shutdown on the PCI device, the corresponding callback function e1000e_shutdown() is trying to clear those correctable errors on the upstream P2P bridge. Unfortunately, we don't have the upstream P2P bridge under some cases (e.g. PCI-passthrou for KVM on Power). That leads to kernel crash eventually. The patch adds one more check on that to avoid kernel crash. Signed-off-by: Li Zhang Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 59c0c54..e87e9b0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5999,11 +5999,18 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the * downstream port of the pci-e switch. + * + * We don't have the associated upstream bridge while assigning + * the PCI device into guest. For example, the KVM on power is + * one of the cases. */ if (adapter->flags & FLAG_IS_QUAD_PORT) { struct pci_dev *us_dev = pdev->bus->self; u16 devctl; + if (!us_dev) + return 0; + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); -- cgit v0.10.2 From 22f8abaa8fa6e62c30eefc8cab99dcaea1f6a882 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 15 Aug 2013 03:43:24 +0000 Subject: e1000e: resolve checkpatch JIFFIES_COMPARISON warning WARNING:JIFFIES_COMPARISON: Comparing jiffies is almost always wrong; prefer time_after, time_before and friends Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index e4ebd7d..a8633b8 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1665,7 +1665,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ret_val = 13; /* ret_val is the same as mis-compare */ break; } - if (jiffies >= (time + 20)) { + if (time_after(jiffies, time + 20)) { ret_val = 14; /* error code for time out error */ break; } -- cgit v0.10.2 From 910c485dc16be51fd6f4ed13cc14a39d7feacf7e Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 19 Nov 2012 19:05:25 +0000 Subject: sfc: Remove unused filter_flags variables and efx_farch_filter_id_flags() Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index fb01fdb..a588c95 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -609,16 +609,6 @@ static inline unsigned int efx_filter_id_index(u32 id) return id & EFX_FILTER_INDEX_MASK; } -static inline u8 efx_filter_id_flags(u32 id) -{ - unsigned int range = id >> EFX_FILTER_INDEX_WIDTH; - - if (range < EFX_FILTER_MATCH_PRI_COUNT) - return EFX_FILTER_FLAG_RX; - else - return EFX_FILTER_FLAG_TX; -} - u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; @@ -829,7 +819,6 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, struct efx_filter_table *table; unsigned int filter_idx; struct efx_filter_spec *spec; - u8 filter_flags; int rc; table_id = efx_filter_id_table_id(filter_id); @@ -842,8 +831,6 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, return -ENOENT; spec = &table->spec[filter_idx]; - filter_flags = efx_filter_id_flags(filter_id); - spin_lock_bh(&state->lock); if (test_bit(filter_idx, table->used_bitmap) && @@ -880,7 +867,6 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, struct efx_filter_table *table; struct efx_filter_spec *spec; unsigned int filter_idx; - u8 filter_flags; int rc; table_id = efx_filter_id_table_id(filter_id); @@ -893,8 +879,6 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, return -ENOENT; spec = &table->spec[filter_idx]; - filter_flags = efx_filter_id_flags(filter_id); - spin_lock_bh(&state->lock); if (test_bit(filter_idx, table->used_bitmap) && -- cgit v0.10.2 From 33a8985c4a414e468c33598c1d70b139bc97b909 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 26 Oct 2012 23:55:46 +0100 Subject: sfc: Rename Falcon-arch filter implementation types and functions The filter table(s) on EF10 are managed by firmware and will need almost entirely separate code. Rename the types and functions used within the existing implementation. The current definition of struct efx_filter_spec is really implementation-specific, so we need to keep it. For now, define a separate structure for the internal representation but leave them identical. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index a588c95..c547630 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -19,60 +19,69 @@ * Due to pipelined implementation we need to program H/W with a value that * is larger than the hop limit we want. */ -#define FILTER_CTL_SRCH_FUDGE_WILD 3 -#define FILTER_CTL_SRCH_FUDGE_FULL 1 +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 /* Hard maximum hop limit. Hardware will time-out beyond 200-something. - * We also need to avoid infinite loops in efx_filter_search() when the + * We also need to avoid infinite loops in efx_farch_filter_search() when the * table is full. */ -#define FILTER_CTL_SRCH_MAX 200 +#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 /* Don't try very hard to find space for performance hints, as this is * counter-productive. */ -#define FILTER_CTL_SRCH_HINT_MAX 5 - -enum efx_filter_table_id { - EFX_FILTER_TABLE_RX_IP = 0, - EFX_FILTER_TABLE_RX_MAC, - EFX_FILTER_TABLE_RX_DEF, - EFX_FILTER_TABLE_TX_MAC, - EFX_FILTER_TABLE_COUNT, +#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 + +enum efx_farch_filter_table_id { + EFX_FARCH_FILTER_TABLE_RX_IP = 0, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, + EFX_FARCH_FILTER_TABLE_TX_MAC, + EFX_FARCH_FILTER_TABLE_COUNT, }; -enum efx_filter_index { - EFX_FILTER_INDEX_UC_DEF, - EFX_FILTER_INDEX_MC_DEF, - EFX_FILTER_SIZE_RX_DEF, +enum efx_farch_filter_index { + EFX_FARCH_FILTER_INDEX_UC_DEF, + EFX_FARCH_FILTER_INDEX_MC_DEF, + EFX_FARCH_FILTER_SIZE_RX_DEF, }; -struct efx_filter_table { - enum efx_filter_table_id id; +struct efx_farch_filter_spec { + u8 type:4; + u8 priority:4; + u8 flags; + u16 dmaq_id; + u32 data[3]; +}; + +struct efx_farch_filter_table { + enum efx_farch_filter_table_id id; u32 offset; /* address of table relative to BAR */ unsigned size; /* number of entries */ unsigned step; /* step between entries */ unsigned used; /* number currently used */ unsigned long *used_bitmap; - struct efx_filter_spec *spec; + struct efx_farch_filter_spec *spec; unsigned search_depth[EFX_FILTER_TYPE_COUNT]; }; struct efx_filter_state { spinlock_t lock; - struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; + struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; #ifdef CONFIG_RFS_ACCEL u32 *rps_flow_id; unsigned rps_expire_index; #endif }; -static void efx_filter_table_clear_entry(struct efx_nic *efx, - struct efx_filter_table *table, - unsigned int filter_idx); +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx); /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit * key derived from the n-tuple. The initial LFSR state is 0xffff. */ -static u16 efx_filter_hash(u32 key) +static u16 efx_farch_filter_hash(u32 key) { u16 tmp; @@ -88,89 +97,97 @@ static u16 efx_filter_hash(u32 key) /* To allow for hash collisions, filter search continues at these * increments from the first possible entry selected by the hash. */ -static u16 efx_filter_increment(u32 key) +static u16 efx_farch_filter_increment(u32 key) { return key * 2 - 1; } -static enum efx_filter_table_id -efx_filter_spec_table_id(const struct efx_filter_spec *spec) +static enum efx_farch_filter_table_id +efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) { - BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2)); - BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2)); - BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2)); - BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2)); - BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2)); - BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2)); - BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FILTER_TCP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FILTER_TCP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FILTER_UDP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FILTER_UDP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FILTER_MAC_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FILTER_MAC_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != + EFX_FARCH_FILTER_TABLE_RX_MAC + 2); EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC); return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); } -static struct efx_filter_table * -efx_filter_spec_table(struct efx_filter_state *state, - const struct efx_filter_spec *spec) +static struct efx_farch_filter_table * +efx_farch_filter_spec_table(struct efx_filter_state *state, + const struct efx_farch_filter_spec *spec) { if (spec->type == EFX_FILTER_UNSPEC) return NULL; else - return &state->table[efx_filter_spec_table_id(spec)]; + return &state->table[efx_farch_filter_spec_table_id(spec)]; } -static void efx_filter_table_reset_search_depth(struct efx_filter_table *table) +static void +efx_farch_filter_table_reset_search_depth(struct efx_farch_filter_table *table) { memset(table->search_depth, 0, sizeof(table->search_depth)); } -static void efx_filter_push_rx_config(struct efx_nic *efx) +static void efx_farch_filter_push_rx_config(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; - struct efx_filter_table *table; + struct efx_farch_filter_table *table; efx_oword_t filter_ctl; efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); - table = &state->table[EFX_FILTER_TABLE_RX_IP]; + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, table->search_depth[EFX_FILTER_TCP_FULL] + - FILTER_CTL_SRCH_FUDGE_FULL); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, table->search_depth[EFX_FILTER_TCP_WILD] + - FILTER_CTL_SRCH_FUDGE_WILD); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, table->search_depth[EFX_FILTER_UDP_FULL] + - FILTER_CTL_SRCH_FUDGE_FULL); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, table->search_depth[EFX_FILTER_UDP_WILD] + - FILTER_CTL_SRCH_FUDGE_WILD); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - table = &state->table[EFX_FILTER_TABLE_RX_MAC]; + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; if (table->size) { EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, table->search_depth[EFX_FILTER_MAC_FULL] + - FILTER_CTL_SRCH_FUDGE_FULL); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, table->search_depth[EFX_FILTER_MAC_WILD] + - FILTER_CTL_SRCH_FUDGE_WILD); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); } - table = &state->table[EFX_FILTER_TABLE_RX_DEF]; + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; if (table->size) { EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, - table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id); + table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, - !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags & + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & EFX_FILTER_FLAG_RX_RSS)); EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, - table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id); + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, - !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & + !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & EFX_FILTER_FLAG_RX_RSS)); /* There is a single bit to enable RX scatter for all @@ -179,8 +196,8 @@ static void efx_filter_push_rx_config(struct efx_nic *efx) */ EFX_SET_OWORD_FIELD( filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, - !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags & - table->spec[EFX_FILTER_INDEX_MC_DEF].flags & + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & EFX_FILTER_FLAG_RX_SCATTER)); } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { /* We don't expose 'default' filters because unmatched @@ -196,24 +213,24 @@ static void efx_filter_push_rx_config(struct efx_nic *efx) efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); } -static void efx_filter_push_tx_limits(struct efx_nic *efx) +static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; - struct efx_filter_table *table; + struct efx_farch_filter_table *table; efx_oword_t tx_cfg; efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); - table = &state->table[EFX_FILTER_TABLE_TX_MAC]; + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; if (table->size) { EFX_SET_OWORD_FIELD( tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, table->search_depth[EFX_FILTER_MAC_FULL] + - FILTER_CTL_SRCH_FUDGE_FULL); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, table->search_depth[EFX_FILTER_MAC_WILD] + - FILTER_CTL_SRCH_FUDGE_WILD); + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); } efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); @@ -427,24 +444,23 @@ int efx_filter_set_mc_def(struct efx_filter_spec *spec) return 0; } -static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) +static void +efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) { struct efx_filter_state *state = efx->filter_state; - struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF]; - struct efx_filter_spec *spec = &table->spec[filter_idx]; - enum efx_filter_flags flags = 0; + struct efx_farch_filter_table *table = + &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; /* If there's only one channel then disable RSS for non VF * traffic, thereby allowing VFs to use RSS when the PF can't. */ - if (efx->n_rx_channels > 1) - flags |= EFX_FILTER_FLAG_RX_RSS; - - if (efx->rx_scatter) - flags |= EFX_FILTER_FLAG_RX_SCATTER; - - efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0); spec->type = EFX_FILTER_UC_DEF + filter_idx; + spec->priority = EFX_FILTER_PRI_MANUAL; + spec->flags = (EFX_FILTER_FLAG_RX | + (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); + spec->dmaq_id = 0; table->used_bitmap[0] |= 1 << filter_idx; } @@ -472,12 +488,13 @@ int efx_filter_get_eth_local(const struct efx_filter_spec *spec, } /* Build a filter entry and return its n-tuple key. */ -static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) +static u32 efx_farch_filter_build(efx_oword_t *filter, + struct efx_farch_filter_spec *spec) { u32 data3; - switch (efx_filter_spec_table_id(spec)) { - case EFX_FILTER_TABLE_RX_IP: { + switch (efx_farch_filter_spec_table_id(spec)) { + case EFX_FARCH_FILTER_TABLE_RX_IP: { bool is_udp = (spec->type == EFX_FILTER_UDP_FULL || spec->type == EFX_FILTER_UDP_WILD); EFX_POPULATE_OWORD_7( @@ -495,7 +512,7 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) break; } - case EFX_FILTER_TABLE_RX_MAC: { + case EFX_FARCH_FILTER_TABLE_RX_MAC: { bool is_wild = spec->type == EFX_FILTER_MAC_WILD; EFX_POPULATE_OWORD_7( *filter, @@ -512,7 +529,7 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) break; } - case EFX_FILTER_TABLE_TX_MAC: { + case EFX_FARCH_FILTER_TABLE_TX_MAC: { bool is_wild = spec->type == EFX_FILTER_MAC_WILD; EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, @@ -531,8 +548,8 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; } -static bool efx_filter_equal(const struct efx_filter_spec *left, - const struct efx_filter_spec *right) +static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, + const struct efx_farch_filter_spec *right) { if (left->type != right->type || memcmp(left->data, right->data, sizeof(left->data))) @@ -554,9 +571,9 @@ static bool efx_filter_equal(const struct efx_filter_spec *left, * accept user-provided IDs. */ -#define EFX_FILTER_MATCH_PRI_COUNT 5 +#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 -static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = { +static const u8 efx_farch_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = { [EFX_FILTER_TCP_FULL] = 0, [EFX_FILTER_UDP_FULL] = 0, [EFX_FILTER_TCP_WILD] = 1, @@ -567,58 +584,58 @@ static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = { [EFX_FILTER_MC_DEF] = 4, }; -static const enum efx_filter_table_id efx_filter_range_table[] = { - EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */ - EFX_FILTER_TABLE_RX_IP, - EFX_FILTER_TABLE_RX_MAC, - EFX_FILTER_TABLE_RX_MAC, - EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ - EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */ - EFX_FILTER_TABLE_COUNT, /* invalid */ - EFX_FILTER_TABLE_TX_MAC, - EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */ +static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { + EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ + EFX_FARCH_FILTER_TABLE_RX_IP, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ }; -#define EFX_FILTER_INDEX_WIDTH 13 -#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1) +#define EFX_FARCH_FILTER_INDEX_WIDTH 13 +#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) static inline u32 -efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index) +efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, + unsigned int index) { unsigned int range; - range = efx_filter_type_match_pri[spec->type]; + range = efx_farch_filter_type_match_pri[spec->type]; if (!(spec->flags & EFX_FILTER_FLAG_RX)) - range += EFX_FILTER_MATCH_PRI_COUNT; + range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; - return range << EFX_FILTER_INDEX_WIDTH | index; + return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; } -static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id) +static inline enum efx_farch_filter_table_id +efx_farch_filter_id_table_id(u32 id) { - unsigned int range = id >> EFX_FILTER_INDEX_WIDTH; + unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; - if (range < ARRAY_SIZE(efx_filter_range_table)) - return efx_filter_range_table[range]; + if (range < ARRAY_SIZE(efx_farch_filter_range_table)) + return efx_farch_filter_range_table[range]; else - return EFX_FILTER_TABLE_COUNT; /* invalid */ + return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ } -static inline unsigned int efx_filter_id_index(u32 id) +static inline unsigned int efx_farch_filter_id_index(u32 id) { - return id & EFX_FILTER_INDEX_MASK; + return id & EFX_FARCH_FILTER_INDEX_MASK; } u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; - unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1; - enum efx_filter_table_id table_id; + unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; + enum efx_farch_filter_table_id table_id; do { - table_id = efx_filter_range_table[range]; + table_id = efx_farch_filter_range_table[range]; if (state->table[table_id].size != 0) - return range << EFX_FILTER_INDEX_WIDTH | + return range << EFX_FARCH_FILTER_INDEX_WIDTH | state->table[table_id].size; } while (range--); @@ -643,29 +660,35 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) * the existing filter has higher priority or -%EEXIST if it has * equal priority. */ -s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, +s32 efx_filter_insert_filter(struct efx_nic *efx, + struct efx_filter_spec *gen_spec, bool replace_equal) { struct efx_filter_state *state = efx->filter_state; - struct efx_filter_table *table = efx_filter_spec_table(state, spec); + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec spec; efx_oword_t filter; int rep_index, ins_index; unsigned int depth = 0; int rc; + /* XXX efx_farch_filter_spec and efx_filter_spec will diverge in future */ + memcpy(&spec, gen_spec, sizeof(*gen_spec)); + + table = efx_farch_filter_spec_table(state, &spec); if (!table || table->size == 0) return -EINVAL; netif_vdbg(efx, hw, efx->net_dev, - "%s: type %d search_depth=%d", __func__, spec->type, - table->search_depth[spec->type]); + "%s: type %d search_depth=%d", __func__, spec.type, + table->search_depth[spec.type]); - if (table->id == EFX_FILTER_TABLE_RX_DEF) { + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { /* One filter spec per type */ - BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); - BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); - rep_index = spec->type - EFX_FILTER_UC_DEF; + rep_index = spec.type - EFX_FILTER_UC_DEF; ins_index = rep_index; spin_lock_bh(&state->lock); @@ -685,13 +708,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, * (b) we have searched exhaustively for (1), and have * either found (2) or searched exhaustively for it */ - u32 key = efx_filter_build(&filter, spec); - unsigned int hash = efx_filter_hash(key); - unsigned int incr = efx_filter_increment(key); - unsigned int max_rep_depth = table->search_depth[spec->type]; + u32 key = efx_farch_filter_build(&filter, &spec); + unsigned int hash = efx_farch_filter_hash(key); + unsigned int incr = efx_farch_filter_increment(key); + unsigned int max_rep_depth = table->search_depth[spec.type]; unsigned int max_ins_depth = - spec->priority <= EFX_FILTER_PRI_HINT ? - FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX; + spec.priority <= EFX_FILTER_PRI_HINT ? + EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : + EFX_FARCH_FILTER_CTL_SRCH_MAX; unsigned int i = hash & (table->size - 1); ins_index = -1; @@ -703,7 +727,8 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, if (!test_bit(i, table->used_bitmap)) { if (ins_index < 0) ins_index = i; - } else if (efx_filter_equal(spec, &table->spec[i])) { + } else if (efx_farch_filter_equal(&spec, + &table->spec[i])) { /* Case (a) */ if (ins_index < 0) ins_index = i; @@ -731,13 +756,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, * should do so */ if (rep_index >= 0) { - struct efx_filter_spec *saved_spec = &table->spec[rep_index]; + struct efx_farch_filter_spec *saved_spec = + &table->spec[rep_index]; - if (spec->priority == saved_spec->priority && !replace_equal) { + if (spec.priority == saved_spec->priority && !replace_equal) { rc = -EEXIST; goto out; } - if (spec->priority < saved_spec->priority) { + if (spec.priority < saved_spec->priority) { rc = -EPERM; goto out; } @@ -748,17 +774,17 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, __set_bit(ins_index, table->used_bitmap); ++table->used; } - table->spec[ins_index] = *spec; + table->spec[ins_index] = spec; - if (table->id == EFX_FILTER_TABLE_RX_DEF) { - efx_filter_push_rx_config(efx); + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + efx_farch_filter_push_rx_config(efx); } else { - if (table->search_depth[spec->type] < depth) { - table->search_depth[spec->type] = depth; - if (spec->flags & EFX_FILTER_FLAG_TX) - efx_filter_push_tx_limits(efx); + if (table->search_depth[spec.type] < depth) { + table->search_depth[spec.type] = depth; + if (spec.flags & EFX_FILTER_FLAG_TX) + efx_farch_filter_push_tx_limits(efx); else - efx_filter_push_rx_config(efx); + efx_farch_filter_push_rx_config(efx); } efx_writeo(efx, &filter, @@ -768,29 +794,31 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, * at a lower depth, clear the replaced filter */ if (ins_index != rep_index && rep_index >= 0) - efx_filter_table_clear_entry(efx, table, rep_index); + efx_farch_filter_table_clear_entry(efx, table, + rep_index); } netif_vdbg(efx, hw, efx->net_dev, "%s: filter type %d index %d rxq %u set", - __func__, spec->type, ins_index, spec->dmaq_id); - rc = efx_filter_make_id(spec, ins_index); + __func__, spec.type, ins_index, spec.dmaq_id); + rc = efx_farch_filter_make_id(&spec, ins_index); out: spin_unlock_bh(&state->lock); return rc; } -static void efx_filter_table_clear_entry(struct efx_nic *efx, - struct efx_filter_table *table, - unsigned int filter_idx) +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx) { static efx_oword_t filter; - if (table->id == EFX_FILTER_TABLE_RX_DEF) { + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { /* RX default filters must always exist */ - efx_filter_reset_rx_def(efx, filter_idx); - efx_filter_push_rx_config(efx); + efx_farch_filter_reset_rx_def(efx, filter_idx); + efx_farch_filter_push_rx_config(efx); } else if (test_bit(filter_idx, table->used_bitmap)) { __clear_bit(filter_idx, table->used_bitmap); --table->used; @@ -815,18 +843,18 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, u32 filter_id) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; unsigned int filter_idx; - struct efx_filter_spec *spec; + struct efx_farch_filter_spec *spec; int rc; - table_id = efx_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT) + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) return -ENOENT; table = &state->table[table_id]; - filter_idx = efx_filter_id_index(filter_id); + filter_idx = efx_farch_filter_id_index(filter_id); if (filter_idx >= table->size) return -ENOENT; spec = &table->spec[filter_idx]; @@ -835,9 +863,9 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, if (test_bit(filter_idx, table->used_bitmap) && spec->priority == priority) { - efx_filter_table_clear_entry(efx, table, filter_idx); + efx_farch_filter_table_clear_entry(efx, table, filter_idx); if (table->used == 0) - efx_filter_table_reset_search_depth(table); + efx_farch_filter_table_reset_search_depth(table); rc = 0; } else { rc = -ENOENT; @@ -863,18 +891,18 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, u32 filter_id, struct efx_filter_spec *spec_buf) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; - struct efx_filter_spec *spec; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec *spec; unsigned int filter_idx; int rc; - table_id = efx_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT) + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) return -ENOENT; table = &state->table[table_id]; - filter_idx = efx_filter_id_index(filter_id); + filter_idx = efx_farch_filter_id_index(filter_id); if (filter_idx >= table->size) return -ENOENT; spec = &table->spec[filter_idx]; @@ -883,7 +911,8 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, if (test_bit(filter_idx, table->used_bitmap) && spec->priority == priority) { - *spec_buf = *spec; + /* XXX efx_farch_filter_spec and efx_filter_spec will diverge */ + memcpy(spec_buf, spec, sizeof(*spec)); rc = 0; } else { rc = -ENOENT; @@ -894,21 +923,23 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, return rc; } -static void efx_filter_table_clear(struct efx_nic *efx, - enum efx_filter_table_id table_id, - enum efx_filter_priority priority) +static void +efx_farch_filter_table_clear(struct efx_nic *efx, + enum efx_farch_filter_table_id table_id, + enum efx_filter_priority priority) { struct efx_filter_state *state = efx->filter_state; - struct efx_filter_table *table = &state->table[table_id]; + struct efx_farch_filter_table *table = &state->table[table_id]; unsigned int filter_idx; spin_lock_bh(&state->lock); for (filter_idx = 0; filter_idx < table->size; ++filter_idx) if (table->spec[filter_idx].priority <= priority) - efx_filter_table_clear_entry(efx, table, filter_idx); + efx_farch_filter_table_clear_entry(efx, table, + filter_idx); if (table->used == 0) - efx_filter_table_reset_search_depth(table); + efx_farch_filter_table_reset_search_depth(table); spin_unlock_bh(&state->lock); } @@ -920,23 +951,25 @@ static void efx_filter_table_clear(struct efx_nic *efx, */ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) { - efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority); - efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, + priority); } u32 efx_filter_count_rx_used(struct efx_nic *efx, enum efx_filter_priority priority) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; unsigned int filter_idx; u32 count = 0; spin_lock_bh(&state->lock); - for (table_id = EFX_FILTER_TABLE_RX_IP; - table_id <= EFX_FILTER_TABLE_RX_DEF; + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; table_id++) { table = &state->table[table_id]; for (filter_idx = 0; filter_idx < table->size; filter_idx++) { @@ -956,15 +989,15 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx, u32 *buf, u32 size) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; unsigned int filter_idx; s32 count = 0; spin_lock_bh(&state->lock); - for (table_id = EFX_FILTER_TABLE_RX_IP; - table_id <= EFX_FILTER_TABLE_RX_DEF; + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; table_id++) { table = &state->table[table_id]; for (filter_idx = 0; filter_idx < table->size; filter_idx++) { @@ -974,7 +1007,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx, count = -EMSGSIZE; goto out; } - buf[count++] = efx_filter_make_id( + buf[count++] = efx_farch_filter_make_id( &table->spec[filter_idx], filter_idx); } } @@ -989,14 +1022,14 @@ out: void efx_restore_filters(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; efx_oword_t filter; unsigned int filter_idx; spin_lock_bh(&state->lock); - for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { table = &state->table[table_id]; /* Check whether this is a regular register table */ @@ -1006,14 +1039,14 @@ void efx_restore_filters(struct efx_nic *efx) for (filter_idx = 0; filter_idx < table->size; filter_idx++) { if (!test_bit(filter_idx, table->used_bitmap)) continue; - efx_filter_build(&filter, &table->spec[filter_idx]); + efx_farch_filter_build(&filter, &table->spec[filter_idx]); efx_writeo(efx, &filter, table->offset + table->step * filter_idx); } } - efx_filter_push_rx_config(efx); - efx_filter_push_tx_limits(efx); + efx_farch_filter_push_rx_config(efx); + efx_farch_filter_push_tx_limits(efx); spin_unlock_bh(&state->lock); } @@ -1021,7 +1054,7 @@ void efx_restore_filters(struct efx_nic *efx) int efx_probe_filters(struct efx_nic *efx) { struct efx_filter_state *state; - struct efx_filter_table *table; + struct efx_farch_filter_table *table; unsigned table_id; state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL); @@ -1039,32 +1072,32 @@ int efx_probe_filters(struct efx_nic *efx) if (!state->rps_flow_id) goto fail; #endif - table = &state->table[EFX_FILTER_TABLE_RX_IP]; - table->id = EFX_FILTER_TABLE_RX_IP; + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + table->id = EFX_FARCH_FILTER_TABLE_RX_IP; table->offset = FR_BZ_RX_FILTER_TBL0; table->size = FR_BZ_RX_FILTER_TBL0_ROWS; table->step = FR_BZ_RX_FILTER_TBL0_STEP; } if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { - table = &state->table[EFX_FILTER_TABLE_RX_MAC]; - table->id = EFX_FILTER_TABLE_RX_MAC; + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; table->offset = FR_CZ_RX_MAC_FILTER_TBL0; table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; - table = &state->table[EFX_FILTER_TABLE_RX_DEF]; - table->id = EFX_FILTER_TABLE_RX_DEF; - table->size = EFX_FILTER_SIZE_RX_DEF; + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; + table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; - table = &state->table[EFX_FILTER_TABLE_TX_MAC]; - table->id = EFX_FILTER_TABLE_TX_MAC; + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; table->offset = FR_CZ_TX_MAC_FILTER_TBL0; table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; } - for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { table = &state->table[table_id]; if (table->size == 0) continue; @@ -1078,14 +1111,14 @@ int efx_probe_filters(struct efx_nic *efx) goto fail; } - if (state->table[EFX_FILTER_TABLE_RX_DEF].size) { + if (state->table[EFX_FARCH_FILTER_TABLE_RX_DEF].size) { /* RX default filters must always exist */ unsigned i; - for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++) - efx_filter_reset_rx_def(efx, i); + for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) + efx_farch_filter_reset_rx_def(efx, i); } - efx_filter_push_rx_config(efx); + efx_farch_filter_push_rx_config(efx); return 0; @@ -1097,9 +1130,9 @@ fail: void efx_remove_filters(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; + enum efx_farch_filter_table_id table_id; - for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) { + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { kfree(state->table[table_id].used_bitmap); vfree(state->table[table_id].spec); } @@ -1113,15 +1146,15 @@ void efx_remove_filters(struct efx_nic *efx) void efx_filter_update_rx_scatter(struct efx_nic *efx) { struct efx_filter_state *state = efx->filter_state; - enum efx_filter_table_id table_id; - struct efx_filter_table *table; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; efx_oword_t filter; unsigned int filter_idx; spin_lock_bh(&state->lock); - for (table_id = EFX_FILTER_TABLE_RX_IP; - table_id <= EFX_FILTER_TABLE_RX_DEF; + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; table_id++) { table = &state->table[table_id]; @@ -1138,17 +1171,17 @@ void efx_filter_update_rx_scatter(struct efx_nic *efx) table->spec[filter_idx].flags &= ~EFX_FILTER_FLAG_RX_SCATTER; - if (table_id == EFX_FILTER_TABLE_RX_DEF) - /* Pushed by efx_filter_push_rx_config() */ + if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) + /* Pushed by efx_farch_filter_push_rx_config() */ continue; - efx_filter_build(&filter, &table->spec[filter_idx]); + efx_farch_filter_build(&filter, &table->spec[filter_idx]); efx_writeo(efx, &filter, table->offset + table->step * filter_idx); } } - efx_filter_push_rx_config(efx); + efx_farch_filter_push_rx_config(efx); spin_unlock_bh(&state->lock); } @@ -1222,7 +1255,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) { struct efx_filter_state *state = efx->filter_state; - struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP]; + struct efx_farch_filter_table *table = + &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; unsigned mask = table->size - 1; unsigned index; unsigned stop; @@ -1242,14 +1276,14 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) netif_info(efx, rx_status, efx->net_dev, "expiring filter %d [flow %u]\n", index, state->rps_flow_id[index]); - efx_filter_table_clear_entry(efx, table, index); + efx_farch_filter_table_clear_entry(efx, table, index); } index = (index + 1) & mask; } state->rps_expire_index = stop; if (table->used == 0) - efx_filter_table_reset_search_depth(table); + efx_farch_filter_table_reset_search_depth(table); spin_unlock_bh(&state->lock); return true; -- cgit v0.10.2 From f26e958cfce0522832a5d593943948e6b61c3126 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 30 Oct 2012 01:01:52 +0000 Subject: sfc: Name the RX drop queue ID Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 4db37f7..ec5cacd 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -822,7 +822,7 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, if (rc) return rc; - if (spec.dmaq_id == 0xfff) + if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) rule->ring_cookie = RX_CLS_FLOW_DISC; else rule->ring_cookie = spec.dmaq_id; @@ -967,7 +967,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, (rule->ring_cookie == RX_CLS_FLOW_DISC) ? - 0xfff : rule->ring_cookie); + EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); switch (rule->flow_type) { case TCP_V4_FLOW: diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 5cb5472..b1170d4 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -76,7 +76,8 @@ enum efx_filter_flags { * @type: Type of match to be performed, from &enum efx_filter_type * @priority: Priority of the filter, from &enum efx_filter_priority * @flags: Miscellaneous flags, from &enum efx_filter_flags - * @dmaq_id: Source/target queue index + * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for + * an RX drop filter * @data: Match data (type-dependent) * * Use the efx_filter_set_*() functions to initialise the @type and @@ -94,6 +95,10 @@ struct efx_filter_spec { u32 data[3]; }; +enum { + EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff +}; + static inline void efx_filter_init_rx(struct efx_filter_spec *spec, enum efx_filter_priority priority, enum efx_filter_flags flags, -- cgit v0.10.2 From 7c460d9be6109834da86052c4d4a9bb0be9cd407 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 27 Oct 2012 00:33:28 +0100 Subject: sfc: Extend and abstract efx_filter_spec to cover Huntington/EF10 Replace type field with match_flags. Add rss_context and match values covering of most of what is now in the MCDI protocol. Change some fields into bitfields so that the structure size doesn't grow beyond 64 bytes. Ditch the filter decoding functions as it is now easier to pick apart the abstract structure. Rewrite ethtool NFC rule functions to set/get filter match flags and values directly. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index ec5cacd..58ae28b 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -799,11 +799,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) return efx_reset(efx, rc); } -/* MAC address mask including only MC flag */ -static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; +/* MAC address mask including only I/G bit */ +static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; #define IP4_ADDR_FULL_MASK ((__force __be32)~0) #define PORT_FULL_MASK ((__force __be16)~0) +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) static int efx_ethtool_get_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) @@ -813,8 +814,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; - u16 vid; - u8 proto; int rc; rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, @@ -827,39 +826,67 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, else rule->ring_cookie = spec.dmaq_id; - if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) { - rule->flow_type = ETHER_FLOW; - memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN); - if (spec.type == EFX_FILTER_MC_DEF) - memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN); - return 0; - } - - rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest); - if (rc == 0) { + if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IP) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V4_FLOW : UDP_V4_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + ip_entry->ip4dst = spec.loc_host[0]; + ip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + ip_entry->ip4src = spec.rem_host[0]; + ip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip_entry->pdst = spec.loc_port; + ip_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip_entry->psrc = spec.rem_port; + ip_mask->psrc = PORT_FULL_MASK; + } + } else if (!(spec.match_flags & + ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | + EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_OUTER_VID))) { rule->flow_type = ETHER_FLOW; - memset(mac_mask->h_dest, ~0, ETH_ALEN); - if (vid != EFX_FILTER_VID_UNSPEC) { - rule->flow_type |= FLOW_EXT; - rule->h_ext.vlan_tci = htons(vid); - rule->m_ext.vlan_tci = htons(0xfff); + if (spec.match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { + memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) + memset(mac_mask->h_dest, ~0, ETH_ALEN); + else + memcpy(mac_mask->h_dest, mac_addr_ig_mask, + ETH_ALEN); } - return 0; + if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { + memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN); + memset(mac_mask->h_source, ~0, ETH_ALEN); + } + if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + mac_entry->h_proto = spec.ether_type; + mac_mask->h_proto = ETHER_TYPE_FULL_MASK; + } + } else { + /* The above should handle all filters that we insert */ + WARN_ON(1); + return -EINVAL; } - rc = efx_filter_get_ipv4_local(&spec, &proto, - &ip_entry->ip4dst, &ip_entry->pdst); - if (rc != 0) { - rc = efx_filter_get_ipv4_full( - &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, - &ip_entry->ip4src, &ip_entry->psrc); - EFX_WARN_ON_PARANOID(rc); - ip_mask->ip4src = IP4_ADDR_FULL_MASK; - ip_mask->psrc = PORT_FULL_MASK; + if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { + rule->flow_type |= FLOW_EXT; + rule->h_ext.vlan_tci = spec.outer_vid; + rule->m_ext.vlan_tci = htons(0xfff); } - rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; - ip_mask->ip4dst = IP4_ADDR_FULL_MASK; - ip_mask->pdst = PORT_FULL_MASK; + return rc; } @@ -969,80 +996,78 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, (rule->ring_cookie == RX_CLS_FLOW_DISC) ? EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); - switch (rule->flow_type) { + switch (rule->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW: - case UDP_V4_FLOW: { - u8 proto = (rule->flow_type == TCP_V4_FLOW ? - IPPROTO_TCP : IPPROTO_UDP); - - /* Must match all of destination, */ - if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK && - ip_mask->pdst == PORT_FULL_MASK)) - return -EINVAL; - /* all or none of source, */ - if ((ip_mask->ip4src || ip_mask->psrc) && - !(ip_mask->ip4src == IP4_ADDR_FULL_MASK && - ip_mask->psrc == PORT_FULL_MASK)) - return -EINVAL; - /* and nothing else */ - if (ip_mask->tos || rule->m_ext.vlan_tci) + case UDP_V4_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IP); + spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ? + IPPROTO_TCP : IPPROTO_UDP); + if (ip_mask->ip4dst) { + if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = ip_entry->ip4dst; + } + if (ip_mask->ip4src) { + if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = ip_entry->ip4src; + } + if (ip_mask->pdst) { + if (ip_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip_entry->pdst; + } + if (ip_mask->psrc) { + if (ip_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip_entry->psrc; + } + if (ip_mask->tos) return -EINVAL; - - if (ip_mask->ip4src) - rc = efx_filter_set_ipv4_full(&spec, proto, - ip_entry->ip4dst, - ip_entry->pdst, - ip_entry->ip4src, - ip_entry->psrc); - else - rc = efx_filter_set_ipv4_local(&spec, proto, - ip_entry->ip4dst, - ip_entry->pdst); - if (rc) - return rc; break; - } - - case ETHER_FLOW | FLOW_EXT: - case ETHER_FLOW: { - u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ? - ntohs(rule->m_ext.vlan_tci) : 0); - /* Must not match on source address or Ethertype */ - if (!is_zero_ether_addr(mac_mask->h_source) || - mac_mask->h_proto) - return -EINVAL; - - /* Is it a default UC or MC filter? */ - if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) && - vlan_tag_mask == 0) { - if (is_multicast_ether_addr(mac_entry->h_dest)) - rc = efx_filter_set_mc_def(&spec); + case ETHER_FLOW: + if (!is_zero_ether_addr(mac_mask->h_dest)) { + if (ether_addr_equal(mac_mask->h_dest, + mac_addr_ig_mask)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + else if (is_broadcast_ether_addr(mac_mask->h_dest)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; else - rc = efx_filter_set_uc_def(&spec); + return -EINVAL; + memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN); } - /* Otherwise, it must match all of destination and all - * or none of VID. - */ - else if (is_broadcast_ether_addr(mac_mask->h_dest) && - (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) { - rc = efx_filter_set_eth_local( - &spec, - vlan_tag_mask ? - ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, - mac_entry->h_dest); - } else { - rc = -EINVAL; + if (!is_zero_ether_addr(mac_mask->h_source)) { + if (!is_broadcast_ether_addr(mac_mask->h_source)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; + memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN); + } + if (mac_mask->h_proto) { + if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = mac_entry->h_proto; } - if (rc) - return rc; break; - } default: return -EINVAL; } + if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { + if (rule->m_ext.vlan_tci != htons(0xfff)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec.outer_vid = rule->h_ext.vlan_tci; + } + rc = efx_filter_insert_filter(efx, &spec, true); if (rc < 0) return rc; diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index c547630..96f8f2e 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -32,6 +32,18 @@ * counter-productive. */ #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 +enum efx_farch_filter_type { + EFX_FARCH_FILTER_TCP_FULL = 0, + EFX_FARCH_FILTER_TCP_WILD, + EFX_FARCH_FILTER_UDP_FULL, + EFX_FARCH_FILTER_UDP_WILD, + EFX_FARCH_FILTER_MAC_FULL = 4, + EFX_FARCH_FILTER_MAC_WILD, + EFX_FARCH_FILTER_UC_DEF = 8, + EFX_FARCH_FILTER_MC_DEF, + EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ +}; + enum efx_farch_filter_table_id { EFX_FARCH_FILTER_TABLE_RX_IP = 0, EFX_FARCH_FILTER_TABLE_RX_MAC, @@ -62,7 +74,7 @@ struct efx_farch_filter_table { unsigned used; /* number currently used */ unsigned long *used_bitmap; struct efx_farch_filter_spec *spec; - unsigned search_depth[EFX_FILTER_TYPE_COUNT]; + unsigned search_depth[EFX_FARCH_FILTER_TYPE_COUNT]; }; struct efx_filter_state { @@ -106,33 +118,22 @@ static enum efx_farch_filter_table_id efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) { BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FILTER_TCP_FULL >> 2)); + (EFX_FARCH_FILTER_TCP_FULL >> 2)); BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FILTER_TCP_WILD >> 2)); + (EFX_FARCH_FILTER_TCP_WILD >> 2)); BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FILTER_UDP_FULL >> 2)); + (EFX_FARCH_FILTER_UDP_FULL >> 2)); BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FILTER_UDP_WILD >> 2)); + (EFX_FARCH_FILTER_UDP_WILD >> 2)); BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != - (EFX_FILTER_MAC_FULL >> 2)); + (EFX_FARCH_FILTER_MAC_FULL >> 2)); BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != - (EFX_FILTER_MAC_WILD >> 2)); + (EFX_FARCH_FILTER_MAC_WILD >> 2)); BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != EFX_FARCH_FILTER_TABLE_RX_MAC + 2); - EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC); return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); } -static struct efx_farch_filter_table * -efx_farch_filter_spec_table(struct efx_filter_state *state, - const struct efx_farch_filter_spec *spec) -{ - if (spec->type == EFX_FILTER_UNSPEC) - return NULL; - else - return &state->table[efx_farch_filter_spec_table_id(spec)]; -} - static void efx_farch_filter_table_reset_search_depth(struct efx_farch_filter_table *table) { @@ -149,27 +150,27 @@ static void efx_farch_filter_push_rx_config(struct efx_nic *efx) table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, - table->search_depth[EFX_FILTER_TCP_FULL] + + table->search_depth[EFX_FARCH_FILTER_TCP_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, - table->search_depth[EFX_FILTER_TCP_WILD] + + table->search_depth[EFX_FARCH_FILTER_TCP_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, - table->search_depth[EFX_FILTER_UDP_FULL] + + table->search_depth[EFX_FARCH_FILTER_UDP_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, - table->search_depth[EFX_FILTER_UDP_WILD] + + table->search_depth[EFX_FARCH_FILTER_UDP_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; if (table->size) { EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, - table->search_depth[EFX_FILTER_MAC_FULL] + + table->search_depth[EFX_FARCH_FILTER_MAC_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, - table->search_depth[EFX_FILTER_MAC_WILD] + + table->search_depth[EFX_FARCH_FILTER_MAC_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); } @@ -225,223 +226,202 @@ static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) if (table->size) { EFX_SET_OWORD_FIELD( tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, - table->search_depth[EFX_FILTER_MAC_FULL] + + table->search_depth[EFX_FARCH_FILTER_MAC_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, - table->search_depth[EFX_FILTER_MAC_WILD] + + table->search_depth[EFX_FARCH_FILTER_MAC_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); } efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); } -static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec, - __be32 host1, __be16 port1, - __be32 host2, __be16 port2) -{ - spec->data[0] = ntohl(host1) << 16 | ntohs(port1); - spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; - spec->data[2] = ntohl(host2); -} - -static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec, - __be32 *host1, __be16 *port1, - __be32 *host2, __be16 *port2) -{ - *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); - *port1 = htons(spec->data[0]); - *host2 = htonl(spec->data[2]); - *port2 = htons(spec->data[1] >> 16); -} - -/** - * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port - * @spec: Specification to initialise - * @proto: Transport layer protocol number - * @host: Local host address (network byte order) - * @port: Local port (network byte order) - */ -int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, - __be32 host, __be16 port) +static int +efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, + const struct efx_filter_spec *gen_spec) { - __be32 host1; - __be16 port1; - - EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); - - /* This cannot currently be combined with other filtering */ - if (spec->type != EFX_FILTER_UNSPEC) - return -EPROTONOSUPPORT; + bool is_full = false; - if (port == 0) + if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && + gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT) return -EINVAL; - switch (proto) { - case IPPROTO_TCP: - spec->type = EFX_FILTER_TCP_WILD; - break; - case IPPROTO_UDP: - spec->type = EFX_FILTER_UDP_WILD; - break; - default: - return -EPROTONOSUPPORT; - } - - /* Filter is constructed in terms of source and destination, - * with the odd wrinkle that the ports are swapped in a UDP - * wildcard filter. We need to convert from local and remote - * (= zero for wildcard) addresses. - */ - host1 = 0; - if (proto != IPPROTO_UDP) { - port1 = 0; - } else { - port1 = port; - port = 0; - } - - __efx_filter_set_ipv4(spec, host1, port1, host, port); - return 0; -} + spec->priority = gen_spec->priority; + spec->flags = gen_spec->flags; + spec->dmaq_id = gen_spec->dmaq_id; -int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port) -{ - __be32 host1; - __be16 port1; + switch (gen_spec->match_flags) { + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): + is_full = true; + /* fall through */ + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { + __be32 rhost, host1, host2; + __be16 rport, port1, port2; - switch (spec->type) { - case EFX_FILTER_TCP_WILD: - *proto = IPPROTO_TCP; - __efx_filter_get_ipv4(spec, &host1, &port1, host, port); - return 0; - case EFX_FILTER_UDP_WILD: - *proto = IPPROTO_UDP; - __efx_filter_get_ipv4(spec, &host1, port, host, &port1); - return 0; - default: - return -EINVAL; - } -} - -/** - * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports - * @spec: Specification to initialise - * @proto: Transport layer protocol number - * @host: Local host address (network byte order) - * @port: Local port (network byte order) - * @rhost: Remote host address (network byte order) - * @rport: Remote port (network byte order) - */ -int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, - __be32 host, __be16 port, - __be32 rhost, __be16 rport) -{ - EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX)); + EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); - /* This cannot currently be combined with other filtering */ - if (spec->type != EFX_FILTER_UNSPEC) - return -EPROTONOSUPPORT; + if (gen_spec->ether_type != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + if (gen_spec->loc_port == 0 || + (is_full && gen_spec->rem_port == 0)) + return -EADDRNOTAVAIL; + switch (gen_spec->ip_proto) { + case IPPROTO_TCP: + spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : + EFX_FARCH_FILTER_TCP_WILD); + break; + case IPPROTO_UDP: + spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : + EFX_FARCH_FILTER_UDP_WILD); + break; + default: + return -EPROTONOSUPPORT; + } - if (port == 0 || rport == 0) - return -EINVAL; + /* Filter is constructed in terms of source and destination, + * with the odd wrinkle that the ports are swapped in a UDP + * wildcard filter. We need to convert from local and remote + * (= zero for wildcard) addresses. + */ + rhost = is_full ? gen_spec->rem_host[0] : 0; + rport = is_full ? gen_spec->rem_port : 0; + host1 = rhost; + host2 = gen_spec->loc_host[0]; + if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { + port1 = gen_spec->loc_port; + port2 = rport; + } else { + port1 = rport; + port2 = gen_spec->loc_port; + } + spec->data[0] = ntohl(host1) << 16 | ntohs(port1); + spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; + spec->data[2] = ntohl(host2); - switch (proto) { - case IPPROTO_TCP: - spec->type = EFX_FILTER_TCP_FULL; break; - case IPPROTO_UDP: - spec->type = EFX_FILTER_UDP_FULL; - break; - default: - return -EPROTONOSUPPORT; } - __efx_filter_set_ipv4(spec, rhost, rport, host, port); - return 0; -} - -int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port, - __be32 *rhost, __be16 *rport) -{ - switch (spec->type) { - case EFX_FILTER_TCP_FULL: - *proto = IPPROTO_TCP; + case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: + is_full = true; + /* fall through */ + case EFX_FILTER_MATCH_LOC_MAC: + spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : + EFX_FARCH_FILTER_MAC_WILD); + spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; + spec->data[1] = (gen_spec->loc_mac[2] << 24 | + gen_spec->loc_mac[3] << 16 | + gen_spec->loc_mac[4] << 8 | + gen_spec->loc_mac[5]); + spec->data[2] = (gen_spec->loc_mac[0] << 8 | + gen_spec->loc_mac[1]); break; - case EFX_FILTER_UDP_FULL: - *proto = IPPROTO_UDP; + + case EFX_FILTER_MATCH_LOC_MAC_IG: + spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? + EFX_FARCH_FILTER_MC_DEF : + EFX_FARCH_FILTER_UC_DEF); + memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ break; + default: - return -EINVAL; + return -EPROTONOSUPPORT; } - __efx_filter_get_ipv4(spec, rhost, rport, host, port); return 0; } -/** - * efx_filter_set_eth_local - specify local Ethernet address and optional VID - * @spec: Specification to initialise - * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC - * @addr: Local Ethernet MAC address - */ -int efx_filter_set_eth_local(struct efx_filter_spec *spec, - u16 vid, const u8 *addr) +static void +efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, + const struct efx_farch_filter_spec *spec) { - EFX_BUG_ON_PARANOID(!(spec->flags & - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))); - - /* This cannot currently be combined with other filtering */ - if (spec->type != EFX_FILTER_UNSPEC) - return -EPROTONOSUPPORT; - - if (vid == EFX_FILTER_VID_UNSPEC) { - spec->type = EFX_FILTER_MAC_WILD; - spec->data[0] = 0; - } else { - spec->type = EFX_FILTER_MAC_FULL; - spec->data[0] = vid; - } + bool is_full = false; - spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5]; - spec->data[2] = addr[0] << 8 | addr[1]; - return 0; -} + /* *gen_spec should be completely initialised, to be consistent + * with efx_filter_init_{rx,tx}() and in case we want to copy + * it back to userland. + */ + memset(gen_spec, 0, sizeof(*gen_spec)); -/** - * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast - * @spec: Specification to initialise - */ -int efx_filter_set_uc_def(struct efx_filter_spec *spec) -{ - EFX_BUG_ON_PARANOID(!(spec->flags & - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))); + gen_spec->priority = spec->priority; + gen_spec->flags = spec->flags; + gen_spec->dmaq_id = spec->dmaq_id; - if (spec->type != EFX_FILTER_UNSPEC) - return -EINVAL; + switch (spec->type) { + case EFX_FARCH_FILTER_TCP_FULL: + case EFX_FARCH_FILTER_UDP_FULL: + is_full = true; + /* fall through */ + case EFX_FARCH_FILTER_TCP_WILD: + case EFX_FARCH_FILTER_UDP_WILD: { + __be32 host1, host2; + __be16 port1, port2; + + gen_spec->match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + if (is_full) + gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_REM_PORT); + gen_spec->ether_type = htons(ETH_P_IP); + gen_spec->ip_proto = + (spec->type == EFX_FARCH_FILTER_TCP_FULL || + spec->type == EFX_FARCH_FILTER_TCP_WILD) ? + IPPROTO_TCP : IPPROTO_UDP; + + host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); + port1 = htons(spec->data[0]); + host2 = htonl(spec->data[2]); + port2 = htons(spec->data[1] >> 16); + if (spec->flags & EFX_FILTER_FLAG_TX) { + gen_spec->loc_host[0] = host1; + gen_spec->rem_host[0] = host2; + } else { + gen_spec->loc_host[0] = host2; + gen_spec->rem_host[0] = host1; + } + if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ + (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { + gen_spec->loc_port = port1; + gen_spec->rem_port = port2; + } else { + gen_spec->loc_port = port2; + gen_spec->rem_port = port1; + } - spec->type = EFX_FILTER_UC_DEF; - memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ - return 0; -} + break; + } -/** - * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast - * @spec: Specification to initialise - */ -int efx_filter_set_mc_def(struct efx_filter_spec *spec) -{ - EFX_BUG_ON_PARANOID(!(spec->flags & - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))); + case EFX_FARCH_FILTER_MAC_FULL: + is_full = true; + /* fall through */ + case EFX_FARCH_FILTER_MAC_WILD: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; + if (is_full) + gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + gen_spec->loc_mac[0] = spec->data[2] >> 8; + gen_spec->loc_mac[1] = spec->data[2]; + gen_spec->loc_mac[2] = spec->data[1] >> 24; + gen_spec->loc_mac[3] = spec->data[1] >> 16; + gen_spec->loc_mac[4] = spec->data[1] >> 8; + gen_spec->loc_mac[5] = spec->data[1]; + gen_spec->outer_vid = htons(spec->data[0]); + break; - if (spec->type != EFX_FILTER_UNSPEC) - return -EINVAL; + case EFX_FARCH_FILTER_UC_DEF: + case EFX_FARCH_FILTER_MC_DEF: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; + gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; + break; - spec->type = EFX_FILTER_MC_DEF; - memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ - return 0; + default: + WARN_ON(1); + break; + } } static void @@ -455,7 +435,7 @@ efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) /* If there's only one channel then disable RSS for non VF * traffic, thereby allowing VFs to use RSS when the PF can't. */ - spec->type = EFX_FILTER_UC_DEF + filter_idx; + spec->type = EFX_FARCH_FILTER_UC_DEF + filter_idx; spec->priority = EFX_FILTER_PRI_MANUAL; spec->flags = (EFX_FILTER_FLAG_RX | (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | @@ -464,29 +444,6 @@ efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) table->used_bitmap[0] |= 1 << filter_idx; } -int efx_filter_get_eth_local(const struct efx_filter_spec *spec, - u16 *vid, u8 *addr) -{ - switch (spec->type) { - case EFX_FILTER_MAC_WILD: - *vid = EFX_FILTER_VID_UNSPEC; - break; - case EFX_FILTER_MAC_FULL: - *vid = spec->data[0]; - break; - default: - return -EINVAL; - } - - addr[0] = spec->data[2] >> 8; - addr[1] = spec->data[2]; - addr[2] = spec->data[1] >> 24; - addr[3] = spec->data[1] >> 16; - addr[4] = spec->data[1] >> 8; - addr[5] = spec->data[1]; - return 0; -} - /* Build a filter entry and return its n-tuple key. */ static u32 efx_farch_filter_build(efx_oword_t *filter, struct efx_farch_filter_spec *spec) @@ -495,8 +452,8 @@ static u32 efx_farch_filter_build(efx_oword_t *filter, switch (efx_farch_filter_spec_table_id(spec)) { case EFX_FARCH_FILTER_TABLE_RX_IP: { - bool is_udp = (spec->type == EFX_FILTER_UDP_FULL || - spec->type == EFX_FILTER_UDP_WILD); + bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || + spec->type == EFX_FARCH_FILTER_UDP_WILD); EFX_POPULATE_OWORD_7( *filter, FRF_BZ_RSS_EN, @@ -513,7 +470,7 @@ static u32 efx_farch_filter_build(efx_oword_t *filter, } case EFX_FARCH_FILTER_TABLE_RX_MAC: { - bool is_wild = spec->type == EFX_FILTER_MAC_WILD; + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; EFX_POPULATE_OWORD_7( *filter, FRF_CZ_RMFT_RSS_EN, @@ -530,7 +487,7 @@ static u32 efx_farch_filter_build(efx_oword_t *filter, } case EFX_FARCH_FILTER_TABLE_TX_MAC: { - bool is_wild = spec->type == EFX_FILTER_MAC_WILD; + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; EFX_POPULATE_OWORD_5(*filter, FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, @@ -573,15 +530,15 @@ static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 -static const u8 efx_farch_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = { - [EFX_FILTER_TCP_FULL] = 0, - [EFX_FILTER_UDP_FULL] = 0, - [EFX_FILTER_TCP_WILD] = 1, - [EFX_FILTER_UDP_WILD] = 1, - [EFX_FILTER_MAC_FULL] = 2, - [EFX_FILTER_MAC_WILD] = 3, - [EFX_FILTER_UC_DEF] = 4, - [EFX_FILTER_MC_DEF] = 4, +static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { + [EFX_FARCH_FILTER_TCP_FULL] = 0, + [EFX_FARCH_FILTER_UDP_FULL] = 0, + [EFX_FARCH_FILTER_TCP_WILD] = 1, + [EFX_FARCH_FILTER_UDP_WILD] = 1, + [EFX_FARCH_FILTER_MAC_FULL] = 2, + [EFX_FARCH_FILTER_MAC_WILD] = 3, + [EFX_FARCH_FILTER_UC_DEF] = 4, + [EFX_FARCH_FILTER_MC_DEF] = 4, }; static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { @@ -672,11 +629,12 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, unsigned int depth = 0; int rc; - /* XXX efx_farch_filter_spec and efx_filter_spec will diverge in future */ - memcpy(&spec, gen_spec, sizeof(*gen_spec)); + rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); + if (rc) + return rc; - table = efx_farch_filter_spec_table(state, &spec); - if (!table || table->size == 0) + table = &state->table[efx_farch_filter_spec_table_id(&spec)]; + if (table->size == 0) return -EINVAL; netif_vdbg(efx, hw, efx->net_dev, @@ -687,8 +645,8 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, /* One filter spec per type */ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != - EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); - rep_index = spec.type - EFX_FILTER_UC_DEF; + EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); + rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; ins_index = rep_index; spin_lock_bh(&state->lock); @@ -911,8 +869,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, if (test_bit(filter_idx, table->used_bitmap) && spec->priority == priority) { - /* XXX efx_farch_filter_spec and efx_filter_spec will diverge */ - memcpy(spec_buf, spec, sizeof(*spec)); + efx_farch_filter_to_gen_spec(spec_buf, spec); rc = 0; } else { rc = -ENOENT; diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index b1170d4..1b410de 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -11,32 +11,49 @@ #define EFX_FILTER_H #include +#include +#include /** - * enum efx_filter_type - type of hardware filter - * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple - * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port) - * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple - * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) - * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID - * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address - * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast - * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast - * @EFX_FILTER_UNSPEC: Match type is unspecified + * enum efx_filter_match_flags - Flags for hardware filter match type + * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address + * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address + * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address + * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port + * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address + * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port + * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type + * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID + * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID + * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol + * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * Used for RX default unicast and multicast/broadcast filters. * - * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. + * Only some combinations are supported, depending on NIC type: + * + * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or + * local 2-tuple (only implemented for Falcon B0) + * + * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple + * or local 2-tuple, or local MAC with or without outer VID, and RX + * default filters + * + * - Huntington supports filter matching controlled by firmware, potentially + * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit, + * with or without outer and inner VID */ -enum efx_filter_type { - EFX_FILTER_TCP_FULL = 0, - EFX_FILTER_TCP_WILD, - EFX_FILTER_UDP_FULL, - EFX_FILTER_UDP_WILD, - EFX_FILTER_MAC_FULL = 4, - EFX_FILTER_MAC_WILD, - EFX_FILTER_UC_DEF = 8, - EFX_FILTER_MC_DEF, - EFX_FILTER_TYPE_COUNT, /* number of specific types */ - EFX_FILTER_UNSPEC = 0xf, +enum efx_filter_match_flags { + EFX_FILTER_MATCH_REM_HOST = 0x0001, + EFX_FILTER_MATCH_LOC_HOST = 0x0002, + EFX_FILTER_MATCH_REM_MAC = 0x0004, + EFX_FILTER_MATCH_REM_PORT = 0x0008, + EFX_FILTER_MATCH_LOC_MAC = 0x0010, + EFX_FILTER_MATCH_LOC_PORT = 0x0020, + EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, + EFX_FILTER_MATCH_INNER_VID = 0x0080, + EFX_FILTER_MATCH_OUTER_VID = 0x0100, + EFX_FILTER_MATCH_IP_PROTO = 0x0200, + EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, }; /** @@ -73,29 +90,55 @@ enum efx_filter_flags { /** * struct efx_filter_spec - specification for a hardware filter - * @type: Type of match to be performed, from &enum efx_filter_type + * @match_flags: Match type flags, from &enum efx_filter_match_flags * @priority: Priority of the filter, from &enum efx_filter_priority * @flags: Miscellaneous flags, from &enum efx_filter_flags + * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for * an RX drop filter - * @data: Match data (type-dependent) + * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set + * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set + * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or + * %EFX_FILTER_MATCH_LOC_MAC_IG is set + * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set + * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set + * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO + * is set + * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set + * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set + * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set + * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set * - * Use the efx_filter_set_*() functions to initialise the @type and - * @data fields. + * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be + * used to initialise the structure. The efx_filter_set_*() functions + * may then be used to set @rss_context, @match_flags and related + * fields. * * The @priority field is used by software to determine whether a new * filter may replace an old one. The hardware priority of a filter - * depends on the filter type. + * depends on which fields are matched. */ struct efx_filter_spec { - u8 type:4; - u8 priority:4; - u8 flags; - u16 dmaq_id; - u32 data[3]; + u32 match_flags:12; + u32 priority:2; + u32 flags:6; + u32 dmaq_id:12; + u32 rss_context; + __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + __be16 inner_vid; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + __be16 ether_type; + u8 ip_proto; + __be32 loc_host[4]; + __be32 rem_host[4]; + __be16 loc_port; + __be16 rem_port; + /* total 64 bytes */ }; enum { + EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff, EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff }; @@ -104,39 +147,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec, enum efx_filter_flags flags, unsigned rxq_id) { - spec->type = EFX_FILTER_UNSPEC; + memset(spec, 0, sizeof(*spec)); spec->priority = priority; spec->flags = EFX_FILTER_FLAG_RX | flags; + spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; spec->dmaq_id = rxq_id; } static inline void efx_filter_init_tx(struct efx_filter_spec *spec, unsigned txq_id) { - spec->type = EFX_FILTER_UNSPEC; + memset(spec, 0, sizeof(*spec)); spec->priority = EFX_FILTER_PRI_REQUIRED; spec->flags = EFX_FILTER_FLAG_TX; spec->dmaq_id = txq_id; } -extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, - __be32 host, __be16 port); -extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port); -extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, - __be32 host, __be16 port, - __be32 rhost, __be16 rport); -extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, - u8 *proto, __be32 *host, __be16 *port, - __be32 *rhost, __be16 *rport); -extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, - u16 vid, const u8 *addr); -extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, - u16 *vid, u8 *addr); -extern int efx_filter_set_uc_def(struct efx_filter_spec *spec); -extern int efx_filter_set_mc_def(struct efx_filter_spec *spec); +/** + * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @host: Local host address (network byte order) + * @port: Local port (network byte order) + */ +static inline int +efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, + __be32 host, __be16 port) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = host; + spec->loc_port = port; + return 0; +} + +/** + * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @lhost: Local host address (network byte order) + * @lport: Local port (network byte order) + * @rhost: Remote host address (network byte order) + * @rport: Remote port (network byte order) + */ +static inline int +efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, + __be32 lhost, __be16 lport, + __be32 rhost, __be16 rport) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = lhost; + spec->loc_port = lport; + spec->rem_host[0] = rhost; + spec->rem_port = rport; + return 0; +} + enum { EFX_FILTER_VID_UNSPEC = 0xffff, }; +/** + * efx_filter_set_eth_local - specify local Ethernet address and/or VID + * @spec: Specification to initialise + * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC + * @addr: Local Ethernet MAC address, or %NULL + */ +static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec, + u16 vid, const u8 *addr) +{ + if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL) + return -EINVAL; + + if (vid != EFX_FILTER_VID_UNSPEC) { + spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec->outer_vid = htons(vid); + } + if (addr != NULL) { + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; + memcpy(spec->loc_mac, addr, ETH_ALEN); + } + return 0; +} + +/** + * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + return 0; +} + +/** + * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + spec->loc_mac[0] = 1; + return 0; +} + #endif /* EFX_FILTER_H */ -- cgit v0.10.2 From 6d661cec7955e30c6f8813752ac5f95c736a6d49 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 27 Oct 2012 00:33:30 +0100 Subject: sfc: Split Falcon-arch-specific and common filter state Move the common state from struct efx_filter_state into struct efx_nic. Rename struct efx_filter_state to efx_farch_filter_state and change the type of efx_nic::filter_state to void *. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 96f8f2e..29986d3 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -77,13 +77,8 @@ struct efx_farch_filter_table { unsigned search_depth[EFX_FARCH_FILTER_TYPE_COUNT]; }; -struct efx_filter_state { - spinlock_t lock; +struct efx_farch_filter_state { struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; -#ifdef CONFIG_RFS_ACCEL - u32 *rps_flow_id; - unsigned rps_expire_index; -#endif }; static void @@ -142,7 +137,7 @@ efx_farch_filter_table_reset_search_depth(struct efx_farch_filter_table *table) static void efx_farch_filter_push_rx_config(struct efx_nic *efx) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table; efx_oword_t filter_ctl; @@ -216,7 +211,7 @@ static void efx_farch_filter_push_rx_config(struct efx_nic *efx) static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table; efx_oword_t tx_cfg; @@ -427,7 +422,7 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, static void efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; @@ -585,7 +580,7 @@ static inline unsigned int efx_farch_filter_id_index(u32 id) u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; enum efx_farch_filter_table_id table_id; @@ -621,7 +616,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *gen_spec, bool replace_equal) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table; struct efx_farch_filter_spec spec; efx_oword_t filter; @@ -649,7 +644,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; ins_index = rep_index; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); } else { /* Search concurrently for * (1) a filter to be replaced (rep_index): any filter @@ -679,7 +674,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, ins_index = -1; depth = 1; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); for (;;) { if (!test_bit(i, table->used_bitmap)) { @@ -762,7 +757,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, rc = efx_farch_filter_make_id(&spec, ins_index); out: - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); return rc; } @@ -800,7 +795,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; unsigned int filter_idx; @@ -817,7 +812,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, return -ENOENT; spec = &table->spec[filter_idx]; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); if (test_bit(filter_idx, table->used_bitmap) && spec->priority == priority) { @@ -829,7 +824,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, rc = -ENOENT; } - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); return rc; } @@ -848,7 +843,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec_buf) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; struct efx_farch_filter_spec *spec; @@ -865,7 +860,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, return -ENOENT; spec = &table->spec[filter_idx]; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); if (test_bit(filter_idx, table->used_bitmap) && spec->priority == priority) { @@ -875,7 +870,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx, rc = -ENOENT; } - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); return rc; } @@ -885,11 +880,11 @@ efx_farch_filter_table_clear(struct efx_nic *efx, enum efx_farch_filter_table_id table_id, enum efx_filter_priority priority) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table = &state->table[table_id]; unsigned int filter_idx; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); for (filter_idx = 0; filter_idx < table->size; ++filter_idx) if (table->spec[filter_idx].priority <= priority) @@ -898,7 +893,7 @@ efx_farch_filter_table_clear(struct efx_nic *efx, if (table->used == 0) efx_farch_filter_table_reset_search_depth(table); - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); } /** @@ -917,13 +912,13 @@ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) u32 efx_filter_count_rx_used(struct efx_nic *efx, enum efx_filter_priority priority) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; unsigned int filter_idx; u32 count = 0; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; @@ -936,7 +931,7 @@ u32 efx_filter_count_rx_used(struct efx_nic *efx, } } - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); return count; } @@ -945,13 +940,13 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx, enum efx_filter_priority priority, u32 *buf, u32 size) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; unsigned int filter_idx; s32 count = 0; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; @@ -970,7 +965,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx, } } out: - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); return count; } @@ -978,13 +973,13 @@ out: /* Restore filter stater after reset */ void efx_restore_filters(struct efx_nic *efx) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; efx_oword_t filter; unsigned int filter_idx; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { table = &state->table[table_id]; @@ -1005,28 +1000,28 @@ void efx_restore_filters(struct efx_nic *efx) efx_farch_filter_push_rx_config(efx); efx_farch_filter_push_tx_limits(efx); - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); } int efx_probe_filters(struct efx_nic *efx) { - struct efx_filter_state *state; + struct efx_farch_filter_state *state; struct efx_farch_filter_table *table; unsigned table_id; - state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL); + state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); if (!state) return -ENOMEM; efx->filter_state = state; - spin_lock_init(&state->lock); + spin_lock_init(&efx->filter_lock); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { #ifdef CONFIG_RFS_ACCEL - state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS, - sizeof(*state->rps_flow_id), - GFP_KERNEL); - if (!state->rps_flow_id) + efx->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS, + sizeof(*efx->rps_flow_id), + GFP_KERNEL); + if (!efx->rps_flow_id) goto fail; #endif table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; @@ -1086,7 +1081,7 @@ fail: void efx_remove_filters(struct efx_nic *efx) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; enum efx_farch_filter_table_id table_id; for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { @@ -1094,7 +1089,7 @@ void efx_remove_filters(struct efx_nic *efx) vfree(state->table[table_id].spec); } #ifdef CONFIG_RFS_ACCEL - kfree(state->rps_flow_id); + kfree(efx->rps_flow_id); #endif kfree(state); } @@ -1102,13 +1097,13 @@ void efx_remove_filters(struct efx_nic *efx) /* Update scatter enable flags for filters pointing to our own RX queues */ void efx_filter_update_rx_scatter(struct efx_nic *efx) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; enum efx_farch_filter_table_id table_id; struct efx_farch_filter_table *table; efx_oword_t filter; unsigned int filter_idx; - spin_lock_bh(&state->lock); + spin_lock_bh(&efx->filter_lock); for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; @@ -1140,7 +1135,7 @@ void efx_filter_update_rx_scatter(struct efx_nic *efx) efx_farch_filter_push_rx_config(efx); - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); } #ifdef CONFIG_RFS_ACCEL @@ -1150,7 +1145,6 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; - struct efx_filter_state *state = efx->filter_state; struct efx_filter_spec spec; const struct iphdr *ip; const __be16 *ports; @@ -1196,7 +1190,7 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, return rc; /* Remember this so we can check whether to expire the filter later */ - state->rps_flow_id[rc] = flow_id; + efx->rps_flow_id[rc] = flow_id; channel = efx_get_channel(efx, skb_get_rx_queue(skb)); ++channel->rfs_filters_added; @@ -1211,17 +1205,17 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) { - struct efx_filter_state *state = efx->filter_state; + struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; unsigned mask = table->size - 1; unsigned index; unsigned stop; - if (!spin_trylock_bh(&state->lock)) + if (!spin_trylock_bh(&efx->filter_lock)) return false; - index = state->rps_expire_index; + index = efx->rps_expire_index; stop = (index + quota) & mask; while (index != stop) { @@ -1229,20 +1223,20 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) table->spec[index].priority == EFX_FILTER_PRI_HINT && rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, - state->rps_flow_id[index], index)) { + efx->rps_flow_id[index], index)) { netif_info(efx, rx_status, efx->net_dev, "expiring filter %d [flow %u]\n", - index, state->rps_flow_id[index]); + index, efx->rps_flow_id[index]); efx_farch_filter_table_clear_entry(efx, table, index); } index = (index + 1) & mask; } - state->rps_expire_index = stop; + efx->rps_expire_index = stop; if (table->used == 0) efx_farch_filter_table_reset_search_depth(table); - spin_unlock_bh(&state->lock); + spin_unlock_bh(&efx->filter_lock); return true; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 694c572..5287a3c 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -676,7 +676,6 @@ union efx_multicast_hash { efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; }; -struct efx_filter_state; struct efx_vf; struct vfdi_status; @@ -763,6 +762,11 @@ struct vfdi_status; * @loopback_mode: Loopback status * @loopback_modes: Supported loopback mode bitmask * @loopback_selftest: Offline self-test private state + * @filter_lock: Filter table lock + * @filter_state: Architecture-dependent filter table state + * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, + * indexed by filter ID + * @rps_expire_index: Next index to check for expiry in @rps_flow_id * @drain_pending: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. @@ -898,7 +902,12 @@ struct efx_nic { void *loopback_selftest; - struct efx_filter_state *filter_state; + spinlock_t filter_lock; + void *filter_state; +#ifdef CONFIG_RFS_ACCEL + u32 *rps_flow_id; + unsigned int rps_expire_index; +#endif atomic_t drain_pending; atomic_t rxq_flush_pending; -- cgit v0.10.2 From 9a0a943321cc89a9efc8726e28d8473eafa73e29 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 27 Oct 2012 00:33:48 +0100 Subject: sfc: Refactor Falcon-arch search limit reset Currently every call to efx_farch_filter_table_clear_entry() is shortly followed by a conditional reset of the table limits. The new limits (0) are not pushed to hardware until the next filter insertion. Move both the reset and the hardware reconfiguration into efx_farch_filter_table_clear_entry(), and add an explanatory comment. Also, make consistent use of the term 'search limit' for the maximum number of probes the NIC must make when searching for a filter of a particular type. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 29986d3..ad66376 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -22,7 +22,7 @@ #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 -/* Hard maximum hop limit. Hardware will time-out beyond 200-something. +/* Hard maximum search limit. Hardware will time-out beyond 200-something. * We also need to avoid infinite loops in efx_farch_filter_search() when the * table is full. */ @@ -74,7 +74,7 @@ struct efx_farch_filter_table { unsigned used; /* number currently used */ unsigned long *used_bitmap; struct efx_farch_filter_spec *spec; - unsigned search_depth[EFX_FARCH_FILTER_TYPE_COUNT]; + unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; }; struct efx_farch_filter_state { @@ -129,12 +129,6 @@ efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); } -static void -efx_farch_filter_table_reset_search_depth(struct efx_farch_filter_table *table) -{ - memset(table->search_depth, 0, sizeof(table->search_depth)); -} - static void efx_farch_filter_push_rx_config(struct efx_nic *efx) { struct efx_farch_filter_state *state = efx->filter_state; @@ -145,27 +139,27 @@ static void efx_farch_filter_push_rx_config(struct efx_nic *efx) table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, - table->search_depth[EFX_FARCH_FILTER_TCP_FULL] + + table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, - table->search_depth[EFX_FARCH_FILTER_TCP_WILD] + + table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, - table->search_depth[EFX_FARCH_FILTER_UDP_FULL] + + table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, - table->search_depth[EFX_FARCH_FILTER_UDP_WILD] + + table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; if (table->size) { EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, - table->search_depth[EFX_FARCH_FILTER_MAC_FULL] + + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, - table->search_depth[EFX_FARCH_FILTER_MAC_WILD] + + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); } @@ -221,11 +215,11 @@ static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) if (table->size) { EFX_SET_OWORD_FIELD( tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, - table->search_depth[EFX_FARCH_FILTER_MAC_FULL] + + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); EFX_SET_OWORD_FIELD( tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, - table->search_depth[EFX_FARCH_FILTER_MAC_WILD] + + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); } @@ -633,8 +627,8 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, return -EINVAL; netif_vdbg(efx, hw, efx->net_dev, - "%s: type %d search_depth=%d", __func__, spec.type, - table->search_depth[spec.type]); + "%s: type %d search_limit=%d", __func__, spec.type, + table->search_limit[spec.type]); if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { /* One filter spec per type */ @@ -664,7 +658,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, u32 key = efx_farch_filter_build(&filter, &spec); unsigned int hash = efx_farch_filter_hash(key); unsigned int incr = efx_farch_filter_increment(key); - unsigned int max_rep_depth = table->search_depth[spec.type]; + unsigned int max_rep_depth = table->search_limit[spec.type]; unsigned int max_ins_depth = spec.priority <= EFX_FILTER_PRI_HINT ? EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : @@ -732,8 +726,8 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { efx_farch_filter_push_rx_config(efx); } else { - if (table->search_depth[spec.type] < depth) { - table->search_depth[spec.type] = depth; + if (table->search_limit[spec.type] < depth) { + table->search_limit[spec.type] = depth; if (spec.flags & EFX_FILTER_FLAG_TX) efx_farch_filter_push_tx_limits(efx); else @@ -779,6 +773,21 @@ efx_farch_filter_table_clear_entry(struct efx_nic *efx, efx_writeo(efx, &filter, table->offset + table->step * filter_idx); + + /* If this filter required a greater search depth than + * any other, the search limit for its type can now be + * decreased. However, it is hard to determine that + * unless the table has become completely empty - in + * which case, all its search limits can be set to 0. + */ + if (unlikely(table->used == 0)) { + memset(table->search_limit, 0, + sizeof(table->search_limit)); + if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } } } @@ -817,8 +826,6 @@ int efx_filter_remove_id_safe(struct efx_nic *efx, if (test_bit(filter_idx, table->used_bitmap) && spec->priority == priority) { efx_farch_filter_table_clear_entry(efx, table, filter_idx); - if (table->used == 0) - efx_farch_filter_table_reset_search_depth(table); rc = 0; } else { rc = -ENOENT; @@ -885,14 +892,10 @@ efx_farch_filter_table_clear(struct efx_nic *efx, unsigned int filter_idx; spin_lock_bh(&efx->filter_lock); - for (filter_idx = 0; filter_idx < table->size; ++filter_idx) if (table->spec[filter_idx].priority <= priority) efx_farch_filter_table_clear_entry(efx, table, filter_idx); - if (table->used == 0) - efx_farch_filter_table_reset_search_depth(table); - spin_unlock_bh(&efx->filter_lock); } @@ -1233,8 +1236,6 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) } efx->rps_expire_index = stop; - if (table->used == 0) - efx_farch_filter_table_reset_search_depth(table); spin_unlock_bh(&efx->filter_lock); return true; -- cgit v0.10.2 From add7247718c003c8f7c275954083f7db85405bd9 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 8 Nov 2012 01:46:53 +0000 Subject: sfc: Make most filter operations NIC-type-specific Aside from accelerated RFS, there is almost nothing that can be shared between the filter table implementations for the Falcon architecture and EF10. Move the few shared functions into efx.c and rx.c and the rest into farch.c. Introduce efx_nic_type operations for the implementation and inline wrapper functions that call these. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index ef7410f..a612726 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -1,5 +1,4 @@ sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ - filter.o \ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ tenxpress.o txc43128_phy.o falcon_boards.o \ mcdi.o mcdi_port.o mcdi_mon.o ptp.o diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 49d06ca..a2daaae 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -611,7 +611,7 @@ static void efx_start_datapath(struct efx_nic *efx) /* RX filters also have scatter-enabled flags */ if (efx->rx_scatter != old_rx_scatter) - efx_filter_update_rx_scatter(efx); + efx->type->filter_update_rx_scatter(efx); /* We must keep at least one descriptor in a TX ring empty. * We could avoid this when the queue size does not exactly @@ -1499,6 +1499,44 @@ static void efx_remove_nic(struct efx_nic *efx) efx->type->remove(efx); } +static int efx_probe_filters(struct efx_nic *efx) +{ + int rc; + + spin_lock_init(&efx->filter_lock); + + rc = efx->type->filter_table_probe(efx); + if (rc) + return rc; + +#ifdef CONFIG_RFS_ACCEL + if (efx->type->offload_features & NETIF_F_NTUPLE) { + efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, + sizeof(*efx->rps_flow_id), + GFP_KERNEL); + if (!efx->rps_flow_id) { + efx->type->filter_table_remove(efx); + return -ENOMEM; + } + } +#endif + + return 0; +} + +static void efx_remove_filters(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_flow_id); +#endif + efx->type->filter_table_remove(efx); +} + +static void efx_restore_filters(struct efx_nic *efx) +{ + efx->type->filter_table_restore(efx); +} + /************************************************************************** * * NIC startup/shutdown diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 45de5b9..9e35738 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ -extern int efx_probe_filters(struct efx_nic *efx); -extern void efx_restore_filters(struct efx_nic *efx); -extern void efx_remove_filters(struct efx_nic *efx); -extern void efx_filter_update_rx_scatter(struct efx_nic *efx); -extern s32 efx_filter_insert_filter(struct efx_nic *efx, - struct efx_filter_spec *spec, - bool replace); -extern int efx_filter_remove_id_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id); -extern int efx_filter_get_filter_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id, struct efx_filter_spec *); -extern void efx_filter_clear_rx(struct efx_nic *efx, - enum efx_filter_priority priority); -extern u32 efx_filter_count_rx_used(struct efx_nic *efx, - enum efx_filter_priority priority); -extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); -extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 *buf, u32 size); + +/** + * efx_filter_insert_filter - add or replace a filter + * @efx: NIC in which to insert the filter + * @spec: Specification for the filter + * @replace_equal: Flag for whether the specified filter may replace an + * existing filter with equal priority + * + * On success, return the filter ID. + * On failure, return a negative error code. + * + * If an existing filter has equal match values to the new filter + * spec, then the new filter might replace it, depending on the + * relative priorities. If the existing filter has lower priority, or + * if @replace_equal is set and it has equal priority, then it is + * replaced. Otherwise the function fails, returning -%EPERM if + * the existing filter has higher priority or -%EEXIST if it has + * equal priority. + */ +static inline s32 efx_filter_insert_filter(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) +{ + return efx->type->filter_insert(efx, spec, replace_equal); +} + +/** + * efx_filter_remove_id_safe - remove a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int efx_filter_remove_id_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx->type->filter_remove_safe(efx, priority, filter_id); +} + +/** + * efx_filter_get_filter_safe - retrieve a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * @spec: Buffer in which to store filter specification + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int +efx_filter_get_filter_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + return efx->type->filter_get_safe(efx, priority, filter_id, spec); +} + +/** + * efx_farch_filter_clear_rx - remove RX filters by priority + * @efx: NIC from which to remove the filters + * @priority: Maximum priority to remove + */ +static inline void efx_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + return efx->type->filter_clear_rx(efx, priority); +} + +static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + return efx->type->filter_count_rx_used(efx, priority); +} +static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) +{ + return efx->type->filter_get_rx_id_limit(efx); +} +static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + return efx->type->filter_get_rx_ids(efx, priority, buf, size); +} #ifdef CONFIG_RFS_ACCEL extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 0fd8a88..6ea28f8 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2400,6 +2400,21 @@ const struct efx_nic_type falcon_a1_nic_type = { .ev_read_ack = efx_farch_ev_read_ack, .ev_test_generate = efx_farch_ev_test_generate, + /* We don't expose the filter table on Falcon A1 as it is not + * mapped into function 0, but these implementations still + * work with a degenerate case of all tables set to size 0. + */ + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, + .revision = EFX_REV_FALCON_A1, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, @@ -2468,6 +2483,21 @@ const struct efx_nic_type falcon_b0_nic_type = { .ev_process = efx_farch_ev_process, .ev_read_ack = efx_farch_ev_read_ack, .ev_test_generate = efx_farch_ev_test_generate, + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_insert = efx_farch_filter_rfs_insert, + .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, +#endif .revision = EFX_REV_FALCON_B0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, @@ -2483,5 +2513,6 @@ const struct efx_nic_type falcon_b0_nic_type = { .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .mcdi_max_ver = -1, + .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, }; diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 7f50882..1941804 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1779,3 +1779,1111 @@ void efx_farch_init_common(struct efx_nic *efx) efx_writeo(efx, &temp, FR_BZ_TX_PACE); } } + +/************************************************************************** + * + * Filter tables + * + ************************************************************************** + */ + +/* "Fudge factors" - difference between programmed value and actual depth. + * Due to pipelined implementation we need to program H/W with a value that + * is larger than the hop limit we want. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 + +/* Hard maximum search limit. Hardware will time-out beyond 200-something. + * We also need to avoid infinite loops in efx_farch_filter_search() when the + * table is full. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 + +/* Don't try very hard to find space for performance hints, as this is + * counter-productive. */ +#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 + +enum efx_farch_filter_type { + EFX_FARCH_FILTER_TCP_FULL = 0, + EFX_FARCH_FILTER_TCP_WILD, + EFX_FARCH_FILTER_UDP_FULL, + EFX_FARCH_FILTER_UDP_WILD, + EFX_FARCH_FILTER_MAC_FULL = 4, + EFX_FARCH_FILTER_MAC_WILD, + EFX_FARCH_FILTER_UC_DEF = 8, + EFX_FARCH_FILTER_MC_DEF, + EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ +}; + +enum efx_farch_filter_table_id { + EFX_FARCH_FILTER_TABLE_RX_IP = 0, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, + EFX_FARCH_FILTER_TABLE_TX_MAC, + EFX_FARCH_FILTER_TABLE_COUNT, +}; + +enum efx_farch_filter_index { + EFX_FARCH_FILTER_INDEX_UC_DEF, + EFX_FARCH_FILTER_INDEX_MC_DEF, + EFX_FARCH_FILTER_SIZE_RX_DEF, +}; + +struct efx_farch_filter_spec { + u8 type:4; + u8 priority:4; + u8 flags; + u16 dmaq_id; + u32 data[3]; +}; + +struct efx_farch_filter_table { + enum efx_farch_filter_table_id id; + u32 offset; /* address of table relative to BAR */ + unsigned size; /* number of entries */ + unsigned step; /* step between entries */ + unsigned used; /* number currently used */ + unsigned long *used_bitmap; + struct efx_farch_filter_spec *spec; + unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; +}; + +struct efx_farch_filter_state { + struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; +}; + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx); + +/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit + * key derived from the n-tuple. The initial LFSR state is 0xffff. */ +static u16 efx_farch_filter_hash(u32 key) +{ + u16 tmp; + + /* First 16 rounds */ + tmp = 0x1fff ^ key >> 16; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + tmp = tmp ^ tmp >> 9; + /* Last 16 rounds */ + tmp = tmp ^ tmp << 13 ^ key; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + return tmp ^ tmp >> 9; +} + +/* To allow for hash collisions, filter search continues at these + * increments from the first possible entry selected by the hash. */ +static u16 efx_farch_filter_increment(u32 key) +{ + return key * 2 - 1; +} + +static enum efx_farch_filter_table_id +efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) +{ + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != + EFX_FARCH_FILTER_TABLE_RX_MAC + 2); + return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); +} + +static void efx_farch_filter_push_rx_config(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t filter_ctl; + + efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + + /* There is a single bit to enable RX scatter for all + * unmatched packets. Only set it if scatter is + * enabled in both filter specs. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_SCATTER)); + } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + /* We don't expose 'default' filters because unmatched + * packets always go to the queue number found in the + * RSS table. But we still need to set the RX scatter + * bit here. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + efx->rx_scatter); + } + + efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); +} + +static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t tx_cfg; + + efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); +} + +static int +efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, + const struct efx_filter_spec *gen_spec) +{ + bool is_full = false; + + if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && + gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT) + return -EINVAL; + + spec->priority = gen_spec->priority; + spec->flags = gen_spec->flags; + spec->dmaq_id = gen_spec->dmaq_id; + + switch (gen_spec->match_flags) { + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): + is_full = true; + /* fall through */ + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { + __be32 rhost, host1, host2; + __be16 rport, port1, port2; + + EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); + + if (gen_spec->ether_type != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + if (gen_spec->loc_port == 0 || + (is_full && gen_spec->rem_port == 0)) + return -EADDRNOTAVAIL; + switch (gen_spec->ip_proto) { + case IPPROTO_TCP: + spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : + EFX_FARCH_FILTER_TCP_WILD); + break; + case IPPROTO_UDP: + spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : + EFX_FARCH_FILTER_UDP_WILD); + break; + default: + return -EPROTONOSUPPORT; + } + + /* Filter is constructed in terms of source and destination, + * with the odd wrinkle that the ports are swapped in a UDP + * wildcard filter. We need to convert from local and remote + * (= zero for wildcard) addresses. + */ + rhost = is_full ? gen_spec->rem_host[0] : 0; + rport = is_full ? gen_spec->rem_port : 0; + host1 = rhost; + host2 = gen_spec->loc_host[0]; + if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { + port1 = gen_spec->loc_port; + port2 = rport; + } else { + port1 = rport; + port2 = gen_spec->loc_port; + } + spec->data[0] = ntohl(host1) << 16 | ntohs(port1); + spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; + spec->data[2] = ntohl(host2); + + break; + } + + case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: + is_full = true; + /* fall through */ + case EFX_FILTER_MATCH_LOC_MAC: + spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : + EFX_FARCH_FILTER_MAC_WILD); + spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; + spec->data[1] = (gen_spec->loc_mac[2] << 24 | + gen_spec->loc_mac[3] << 16 | + gen_spec->loc_mac[4] << 8 | + gen_spec->loc_mac[5]); + spec->data[2] = (gen_spec->loc_mac[0] << 8 | + gen_spec->loc_mac[1]); + break; + + case EFX_FILTER_MATCH_LOC_MAC_IG: + spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? + EFX_FARCH_FILTER_MC_DEF : + EFX_FARCH_FILTER_UC_DEF); + memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ + break; + + default: + return -EPROTONOSUPPORT; + } + + return 0; +} + +static void +efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, + const struct efx_farch_filter_spec *spec) +{ + bool is_full = false; + + /* *gen_spec should be completely initialised, to be consistent + * with efx_filter_init_{rx,tx}() and in case we want to copy + * it back to userland. + */ + memset(gen_spec, 0, sizeof(*gen_spec)); + + gen_spec->priority = spec->priority; + gen_spec->flags = spec->flags; + gen_spec->dmaq_id = spec->dmaq_id; + + switch (spec->type) { + case EFX_FARCH_FILTER_TCP_FULL: + case EFX_FARCH_FILTER_UDP_FULL: + is_full = true; + /* fall through */ + case EFX_FARCH_FILTER_TCP_WILD: + case EFX_FARCH_FILTER_UDP_WILD: { + __be32 host1, host2; + __be16 port1, port2; + + gen_spec->match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + if (is_full) + gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_REM_PORT); + gen_spec->ether_type = htons(ETH_P_IP); + gen_spec->ip_proto = + (spec->type == EFX_FARCH_FILTER_TCP_FULL || + spec->type == EFX_FARCH_FILTER_TCP_WILD) ? + IPPROTO_TCP : IPPROTO_UDP; + + host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); + port1 = htons(spec->data[0]); + host2 = htonl(spec->data[2]); + port2 = htons(spec->data[1] >> 16); + if (spec->flags & EFX_FILTER_FLAG_TX) { + gen_spec->loc_host[0] = host1; + gen_spec->rem_host[0] = host2; + } else { + gen_spec->loc_host[0] = host2; + gen_spec->rem_host[0] = host1; + } + if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ + (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { + gen_spec->loc_port = port1; + gen_spec->rem_port = port2; + } else { + gen_spec->loc_port = port2; + gen_spec->rem_port = port1; + } + + break; + } + + case EFX_FARCH_FILTER_MAC_FULL: + is_full = true; + /* fall through */ + case EFX_FARCH_FILTER_MAC_WILD: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; + if (is_full) + gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + gen_spec->loc_mac[0] = spec->data[2] >> 8; + gen_spec->loc_mac[1] = spec->data[2]; + gen_spec->loc_mac[2] = spec->data[1] >> 24; + gen_spec->loc_mac[3] = spec->data[1] >> 16; + gen_spec->loc_mac[4] = spec->data[1] >> 8; + gen_spec->loc_mac[5] = spec->data[1]; + gen_spec->outer_vid = htons(spec->data[0]); + break; + + case EFX_FARCH_FILTER_UC_DEF: + case EFX_FARCH_FILTER_MC_DEF: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; + gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; + break; + + default: + WARN_ON(1); + break; + } +} + +static void +efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table = + &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; + + /* If there's only one channel then disable RSS for non VF + * traffic, thereby allowing VFs to use RSS when the PF can't. + */ + spec->type = EFX_FARCH_FILTER_UC_DEF + filter_idx; + spec->priority = EFX_FILTER_PRI_MANUAL; + spec->flags = (EFX_FILTER_FLAG_RX | + (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); + spec->dmaq_id = 0; + table->used_bitmap[0] |= 1 << filter_idx; +} + +/* Build a filter entry and return its n-tuple key. */ +static u32 efx_farch_filter_build(efx_oword_t *filter, + struct efx_farch_filter_spec *spec) +{ + u32 data3; + + switch (efx_farch_filter_spec_table_id(spec)) { + case EFX_FARCH_FILTER_TABLE_RX_IP: { + bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || + spec->type == EFX_FARCH_FILTER_UDP_WILD); + EFX_POPULATE_OWORD_7( + *filter, + FRF_BZ_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_BZ_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_BZ_TCP_UDP, is_udp, + FRF_BZ_RXQ_ID, spec->dmaq_id, + EFX_DWORD_2, spec->data[2], + EFX_DWORD_1, spec->data[1], + EFX_DWORD_0, spec->data[0]); + data3 = is_udp; + break; + } + + case EFX_FARCH_FILTER_TABLE_RX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_7( + *filter, + FRF_CZ_RMFT_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_CZ_RMFT_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, + FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], + FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], + FRF_CZ_RMFT_VLAN_ID, spec->data[0]); + data3 = is_wild; + break; + } + + case EFX_FARCH_FILTER_TABLE_TX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_5(*filter, + FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, + FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], + FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], + FRF_CZ_TMFT_VLAN_ID, spec->data[0]); + data3 = is_wild | spec->dmaq_id << 1; + break; + } + + default: + BUG(); + } + + return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; +} + +static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, + const struct efx_farch_filter_spec *right) +{ + if (left->type != right->type || + memcmp(left->data, right->data, sizeof(left->data))) + return false; + + if (left->flags & EFX_FILTER_FLAG_TX && + left->dmaq_id != right->dmaq_id) + return false; + + return true; +} + +/* + * Construct/deconstruct external filter IDs. At least the RX filter + * IDs must be ordered by matching priority, for RX NFC semantics. + * + * Deconstruction needs to be robust against invalid IDs so that + * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can + * accept user-provided IDs. + */ + +#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 + +static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { + [EFX_FARCH_FILTER_TCP_FULL] = 0, + [EFX_FARCH_FILTER_UDP_FULL] = 0, + [EFX_FARCH_FILTER_TCP_WILD] = 1, + [EFX_FARCH_FILTER_UDP_WILD] = 1, + [EFX_FARCH_FILTER_MAC_FULL] = 2, + [EFX_FARCH_FILTER_MAC_WILD] = 3, + [EFX_FARCH_FILTER_UC_DEF] = 4, + [EFX_FARCH_FILTER_MC_DEF] = 4, +}; + +static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { + EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ + EFX_FARCH_FILTER_TABLE_RX_IP, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ +}; + +#define EFX_FARCH_FILTER_INDEX_WIDTH 13 +#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) + +static inline u32 +efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, + unsigned int index) +{ + unsigned int range; + + range = efx_farch_filter_type_match_pri[spec->type]; + if (!(spec->flags & EFX_FILTER_FLAG_RX)) + range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; + + return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; +} + +static inline enum efx_farch_filter_table_id +efx_farch_filter_id_table_id(u32 id) +{ + unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; + + if (range < ARRAY_SIZE(efx_farch_filter_range_table)) + return efx_farch_filter_range_table[range]; + else + return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ +} + +static inline unsigned int efx_farch_filter_id_index(u32 id) +{ + return id & EFX_FARCH_FILTER_INDEX_MASK; +} + +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; + enum efx_farch_filter_table_id table_id; + + do { + table_id = efx_farch_filter_range_table[range]; + if (state->table[table_id].size != 0) + return range << EFX_FARCH_FILTER_INDEX_WIDTH | + state->table[table_id].size; + } while (range--); + + return 0; +} + +s32 efx_farch_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *gen_spec, + bool replace_equal) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec spec; + efx_oword_t filter; + int rep_index, ins_index; + unsigned int depth = 0; + int rc; + + rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); + if (rc) + return rc; + + table = &state->table[efx_farch_filter_spec_table_id(&spec)]; + if (table->size == 0) + return -EINVAL; + + netif_vdbg(efx, hw, efx->net_dev, + "%s: type %d search_limit=%d", __func__, spec.type, + table->search_limit[spec.type]); + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + /* One filter spec per type */ + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != + EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); + rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; + ins_index = rep_index; + + spin_lock_bh(&efx->filter_lock); + } else { + /* Search concurrently for + * (1) a filter to be replaced (rep_index): any filter + * with the same match values, up to the current + * search depth for this type, and + * (2) the insertion point (ins_index): (1) or any + * free slot before it or up to the maximum search + * depth for this priority + * We fail if we cannot find (2). + * + * We can stop once either + * (a) we find (1), in which case we have definitely + * found (2) as well; or + * (b) we have searched exhaustively for (1), and have + * either found (2) or searched exhaustively for it + */ + u32 key = efx_farch_filter_build(&filter, &spec); + unsigned int hash = efx_farch_filter_hash(key); + unsigned int incr = efx_farch_filter_increment(key); + unsigned int max_rep_depth = table->search_limit[spec.type]; + unsigned int max_ins_depth = + spec.priority <= EFX_FILTER_PRI_HINT ? + EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : + EFX_FARCH_FILTER_CTL_SRCH_MAX; + unsigned int i = hash & (table->size - 1); + + ins_index = -1; + depth = 1; + + spin_lock_bh(&efx->filter_lock); + + for (;;) { + if (!test_bit(i, table->used_bitmap)) { + if (ins_index < 0) + ins_index = i; + } else if (efx_farch_filter_equal(&spec, + &table->spec[i])) { + /* Case (a) */ + if (ins_index < 0) + ins_index = i; + rep_index = i; + break; + } + + if (depth >= max_rep_depth && + (ins_index >= 0 || depth >= max_ins_depth)) { + /* Case (b) */ + if (ins_index < 0) { + rc = -EBUSY; + goto out; + } + rep_index = -1; + break; + } + + i = (i + incr) & (table->size - 1); + ++depth; + } + } + + /* If we found a filter to be replaced, check whether we + * should do so + */ + if (rep_index >= 0) { + struct efx_farch_filter_spec *saved_spec = + &table->spec[rep_index]; + + if (spec.priority == saved_spec->priority && !replace_equal) { + rc = -EEXIST; + goto out; + } + if (spec.priority < saved_spec->priority) { + rc = -EPERM; + goto out; + } + } + + /* Insert the filter */ + if (ins_index != rep_index) { + __set_bit(ins_index, table->used_bitmap); + ++table->used; + } + table->spec[ins_index] = spec; + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + efx_farch_filter_push_rx_config(efx); + } else { + if (table->search_limit[spec.type] < depth) { + table->search_limit[spec.type] = depth; + if (spec.flags & EFX_FILTER_FLAG_TX) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } + + efx_writeo(efx, &filter, + table->offset + table->step * ins_index); + + /* If we were able to replace a filter by inserting + * at a lower depth, clear the replaced filter + */ + if (ins_index != rep_index && rep_index >= 0) + efx_farch_filter_table_clear_entry(efx, table, + rep_index); + } + + netif_vdbg(efx, hw, efx->net_dev, + "%s: filter type %d index %d rxq %u set", + __func__, spec.type, ins_index, spec.dmaq_id); + rc = efx_farch_filter_make_id(&spec, ins_index); + +out: + spin_unlock_bh(&efx->filter_lock); + return rc; +} + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx) +{ + static efx_oword_t filter; + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + /* RX default filters must always exist */ + efx_farch_filter_reset_rx_def(efx, filter_idx); + efx_farch_filter_push_rx_config(efx); + } else if (test_bit(filter_idx, table->used_bitmap)) { + __clear_bit(filter_idx, table->used_bitmap); + --table->used; + memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); + + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + + /* If this filter required a greater search depth than + * any other, the search limit for its type can now be + * decreased. However, it is hard to determine that + * unless the table has become completely empty - in + * which case, all its search limits can be set to 0. + */ + if (unlikely(table->used == 0)) { + memset(table->search_limit, 0, + sizeof(table->search_limit)); + if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } + } +} + +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + struct efx_farch_filter_spec *spec; + int rc; + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + spec = &table->spec[filter_idx]; + + spin_lock_bh(&efx->filter_lock); + + if (test_bit(filter_idx, table->used_bitmap) && + spec->priority == priority) { + efx_farch_filter_table_clear_entry(efx, table, filter_idx); + rc = 0; + } else { + rc = -ENOENT; + } + + spin_unlock_bh(&efx->filter_lock); + + return rc; +} + +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec_buf) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec *spec; + unsigned int filter_idx; + int rc; + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + spec = &table->spec[filter_idx]; + + spin_lock_bh(&efx->filter_lock); + + if (test_bit(filter_idx, table->used_bitmap) && + spec->priority == priority) { + efx_farch_filter_to_gen_spec(spec_buf, spec); + rc = 0; + } else { + rc = -ENOENT; + } + + spin_unlock_bh(&efx->filter_lock); + + return rc; +} + +static void +efx_farch_filter_table_clear(struct efx_nic *efx, + enum efx_farch_filter_table_id table_id, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table = &state->table[table_id]; + unsigned int filter_idx; + + spin_lock_bh(&efx->filter_lock); + for (filter_idx = 0; filter_idx < table->size; ++filter_idx) + if (table->spec[filter_idx].priority <= priority) + efx_farch_filter_table_clear_entry(efx, table, + filter_idx); + spin_unlock_bh(&efx->filter_lock); +} + +void efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, + priority); +} + +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + u32 count = 0; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) + ++count; + } + } + + spin_unlock_bh(&efx->filter_lock); + + return count; +} + +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + s32 count = 0; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) { + if (count == size) { + count = -EMSGSIZE; + goto out; + } + buf[count++] = efx_farch_filter_make_id( + &table->spec[filter_idx], filter_idx); + } + } + } +out: + spin_unlock_bh(&efx->filter_lock); + + return count; +} + +/* Restore filter stater after reset */ +void efx_farch_filter_table_restore(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + + /* Check whether this is a regular register table */ + if (table->step == 0) + continue; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap)) + continue; + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + efx_farch_filter_push_tx_limits(efx); + + spin_unlock_bh(&efx->filter_lock); +} + +void efx_farch_filter_table_remove(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + kfree(state->table[table_id].used_bitmap); + vfree(state->table[table_id].spec); + } + kfree(state); +} + +int efx_farch_filter_table_probe(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state; + struct efx_farch_filter_table *table; + unsigned table_id; + + state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); + if (!state) + return -ENOMEM; + efx->filter_state = state; + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + table->id = EFX_FARCH_FILTER_TABLE_RX_IP; + table->offset = FR_BZ_RX_FILTER_TBL0; + table->size = FR_BZ_RX_FILTER_TBL0_ROWS; + table->step = FR_BZ_RX_FILTER_TBL0_STEP; + } + + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; + table->offset = FR_CZ_RX_MAC_FILTER_TBL0; + table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; + table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; + table->offset = FR_CZ_TX_MAC_FILTER_TBL0; + table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; + } + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + if (table->size == 0) + continue; + table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), + sizeof(unsigned long), + GFP_KERNEL); + if (!table->used_bitmap) + goto fail; + table->spec = vzalloc(table->size * sizeof(*table->spec)); + if (!table->spec) + goto fail; + } + + if (state->table[EFX_FARCH_FILTER_TABLE_RX_DEF].size) { + /* RX default filters must always exist */ + unsigned i; + for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) + efx_farch_filter_reset_rx_def(efx, i); + } + + efx_farch_filter_push_rx_config(efx); + + return 0; + +fail: + efx_farch_filter_table_remove(efx); + return -ENOMEM; +} + +/* Update scatter enable flags for filters pointing to our own RX queues */ +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap) || + table->spec[filter_idx].dmaq_id >= + efx->n_rx_channels) + continue; + + if (efx->rx_scatter) + table->spec[filter_idx].flags |= + EFX_FILTER_FLAG_RX_SCATTER; + else + table->spec[filter_idx].flags &= + ~EFX_FILTER_FLAG_RX_SCATTER; + + if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) + /* Pushed by efx_farch_filter_push_rx_config() */ + continue; + + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + + spin_unlock_bh(&efx->filter_lock); +} + +#ifdef CONFIG_RFS_ACCEL + +s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, + struct efx_filter_spec *gen_spec) +{ + return efx_farch_filter_insert(efx, gen_spec, true); +} + +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table = + &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + + if (test_bit(index, table->used_bitmap) && + table->spec[index].priority == EFX_FILTER_PRI_HINT && + rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, + flow_id, index)) { + efx_farch_filter_table_clear_entry(efx, table, index); + return true; + } + + return false; +} + +#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c deleted file mode 100644 index ad66376..0000000 --- a/drivers/net/ethernet/sfc/filter.c +++ /dev/null @@ -1,1244 +0,0 @@ -/**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2005-2010 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -#include -#include -#include "efx.h" -#include "filter.h" -#include "io.h" -#include "nic.h" -#include "farch_regs.h" - -/* "Fudge factors" - difference between programmed value and actual depth. - * Due to pipelined implementation we need to program H/W with a value that - * is larger than the hop limit we want. - */ -#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 -#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 - -/* Hard maximum search limit. Hardware will time-out beyond 200-something. - * We also need to avoid infinite loops in efx_farch_filter_search() when the - * table is full. - */ -#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 - -/* Don't try very hard to find space for performance hints, as this is - * counter-productive. */ -#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 - -enum efx_farch_filter_type { - EFX_FARCH_FILTER_TCP_FULL = 0, - EFX_FARCH_FILTER_TCP_WILD, - EFX_FARCH_FILTER_UDP_FULL, - EFX_FARCH_FILTER_UDP_WILD, - EFX_FARCH_FILTER_MAC_FULL = 4, - EFX_FARCH_FILTER_MAC_WILD, - EFX_FARCH_FILTER_UC_DEF = 8, - EFX_FARCH_FILTER_MC_DEF, - EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ -}; - -enum efx_farch_filter_table_id { - EFX_FARCH_FILTER_TABLE_RX_IP = 0, - EFX_FARCH_FILTER_TABLE_RX_MAC, - EFX_FARCH_FILTER_TABLE_RX_DEF, - EFX_FARCH_FILTER_TABLE_TX_MAC, - EFX_FARCH_FILTER_TABLE_COUNT, -}; - -enum efx_farch_filter_index { - EFX_FARCH_FILTER_INDEX_UC_DEF, - EFX_FARCH_FILTER_INDEX_MC_DEF, - EFX_FARCH_FILTER_SIZE_RX_DEF, -}; - -struct efx_farch_filter_spec { - u8 type:4; - u8 priority:4; - u8 flags; - u16 dmaq_id; - u32 data[3]; -}; - -struct efx_farch_filter_table { - enum efx_farch_filter_table_id id; - u32 offset; /* address of table relative to BAR */ - unsigned size; /* number of entries */ - unsigned step; /* step between entries */ - unsigned used; /* number currently used */ - unsigned long *used_bitmap; - struct efx_farch_filter_spec *spec; - unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; -}; - -struct efx_farch_filter_state { - struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; -}; - -static void -efx_farch_filter_table_clear_entry(struct efx_nic *efx, - struct efx_farch_filter_table *table, - unsigned int filter_idx); - -/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit - * key derived from the n-tuple. The initial LFSR state is 0xffff. */ -static u16 efx_farch_filter_hash(u32 key) -{ - u16 tmp; - - /* First 16 rounds */ - tmp = 0x1fff ^ key >> 16; - tmp = tmp ^ tmp >> 3 ^ tmp >> 6; - tmp = tmp ^ tmp >> 9; - /* Last 16 rounds */ - tmp = tmp ^ tmp << 13 ^ key; - tmp = tmp ^ tmp >> 3 ^ tmp >> 6; - return tmp ^ tmp >> 9; -} - -/* To allow for hash collisions, filter search continues at these - * increments from the first possible entry selected by the hash. */ -static u16 efx_farch_filter_increment(u32 key) -{ - return key * 2 - 1; -} - -static enum efx_farch_filter_table_id -efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) -{ - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_TCP_FULL >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_TCP_WILD >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_UDP_FULL >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_UDP_WILD >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != - (EFX_FARCH_FILTER_MAC_FULL >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != - (EFX_FARCH_FILTER_MAC_WILD >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != - EFX_FARCH_FILTER_TABLE_RX_MAC + 2); - return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); -} - -static void efx_farch_filter_push_rx_config(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table; - efx_oword_t filter_ctl; - - efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; - if (table->size) { - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - } - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; - if (table->size) { - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, - table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, - !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & - EFX_FILTER_FLAG_RX_RSS)); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, - table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, - !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & - EFX_FILTER_FLAG_RX_RSS)); - - /* There is a single bit to enable RX scatter for all - * unmatched packets. Only set it if scatter is - * enabled in both filter specs. - */ - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, - !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & - table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & - EFX_FILTER_FLAG_RX_SCATTER)); - } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { - /* We don't expose 'default' filters because unmatched - * packets always go to the queue number found in the - * RSS table. But we still need to set the RX scatter - * bit here. - */ - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, - efx->rx_scatter); - } - - efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); -} - -static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table; - efx_oword_t tx_cfg; - - efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); - - table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; - if (table->size) { - EFX_SET_OWORD_FIELD( - tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, - table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD( - tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, - table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - } - - efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); -} - -static int -efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, - const struct efx_filter_spec *gen_spec) -{ - bool is_full = false; - - if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && - gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT) - return -EINVAL; - - spec->priority = gen_spec->priority; - spec->flags = gen_spec->flags; - spec->dmaq_id = gen_spec->dmaq_id; - - switch (gen_spec->match_flags) { - case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | - EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): - is_full = true; - /* fall through */ - case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { - __be32 rhost, host1, host2; - __be16 rport, port1, port2; - - EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); - - if (gen_spec->ether_type != htons(ETH_P_IP)) - return -EPROTONOSUPPORT; - if (gen_spec->loc_port == 0 || - (is_full && gen_spec->rem_port == 0)) - return -EADDRNOTAVAIL; - switch (gen_spec->ip_proto) { - case IPPROTO_TCP: - spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : - EFX_FARCH_FILTER_TCP_WILD); - break; - case IPPROTO_UDP: - spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : - EFX_FARCH_FILTER_UDP_WILD); - break; - default: - return -EPROTONOSUPPORT; - } - - /* Filter is constructed in terms of source and destination, - * with the odd wrinkle that the ports are swapped in a UDP - * wildcard filter. We need to convert from local and remote - * (= zero for wildcard) addresses. - */ - rhost = is_full ? gen_spec->rem_host[0] : 0; - rport = is_full ? gen_spec->rem_port : 0; - host1 = rhost; - host2 = gen_spec->loc_host[0]; - if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { - port1 = gen_spec->loc_port; - port2 = rport; - } else { - port1 = rport; - port2 = gen_spec->loc_port; - } - spec->data[0] = ntohl(host1) << 16 | ntohs(port1); - spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; - spec->data[2] = ntohl(host2); - - break; - } - - case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: - is_full = true; - /* fall through */ - case EFX_FILTER_MATCH_LOC_MAC: - spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : - EFX_FARCH_FILTER_MAC_WILD); - spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; - spec->data[1] = (gen_spec->loc_mac[2] << 24 | - gen_spec->loc_mac[3] << 16 | - gen_spec->loc_mac[4] << 8 | - gen_spec->loc_mac[5]); - spec->data[2] = (gen_spec->loc_mac[0] << 8 | - gen_spec->loc_mac[1]); - break; - - case EFX_FILTER_MATCH_LOC_MAC_IG: - spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? - EFX_FARCH_FILTER_MC_DEF : - EFX_FARCH_FILTER_UC_DEF); - memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ - break; - - default: - return -EPROTONOSUPPORT; - } - - return 0; -} - -static void -efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, - const struct efx_farch_filter_spec *spec) -{ - bool is_full = false; - - /* *gen_spec should be completely initialised, to be consistent - * with efx_filter_init_{rx,tx}() and in case we want to copy - * it back to userland. - */ - memset(gen_spec, 0, sizeof(*gen_spec)); - - gen_spec->priority = spec->priority; - gen_spec->flags = spec->flags; - gen_spec->dmaq_id = spec->dmaq_id; - - switch (spec->type) { - case EFX_FARCH_FILTER_TCP_FULL: - case EFX_FARCH_FILTER_UDP_FULL: - is_full = true; - /* fall through */ - case EFX_FARCH_FILTER_TCP_WILD: - case EFX_FARCH_FILTER_UDP_WILD: { - __be32 host1, host2; - __be16 port1, port2; - - gen_spec->match_flags = - EFX_FILTER_MATCH_ETHER_TYPE | - EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; - if (is_full) - gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | - EFX_FILTER_MATCH_REM_PORT); - gen_spec->ether_type = htons(ETH_P_IP); - gen_spec->ip_proto = - (spec->type == EFX_FARCH_FILTER_TCP_FULL || - spec->type == EFX_FARCH_FILTER_TCP_WILD) ? - IPPROTO_TCP : IPPROTO_UDP; - - host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); - port1 = htons(spec->data[0]); - host2 = htonl(spec->data[2]); - port2 = htons(spec->data[1] >> 16); - if (spec->flags & EFX_FILTER_FLAG_TX) { - gen_spec->loc_host[0] = host1; - gen_spec->rem_host[0] = host2; - } else { - gen_spec->loc_host[0] = host2; - gen_spec->rem_host[0] = host1; - } - if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ - (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { - gen_spec->loc_port = port1; - gen_spec->rem_port = port2; - } else { - gen_spec->loc_port = port2; - gen_spec->rem_port = port1; - } - - break; - } - - case EFX_FARCH_FILTER_MAC_FULL: - is_full = true; - /* fall through */ - case EFX_FARCH_FILTER_MAC_WILD: - gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; - if (is_full) - gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; - gen_spec->loc_mac[0] = spec->data[2] >> 8; - gen_spec->loc_mac[1] = spec->data[2]; - gen_spec->loc_mac[2] = spec->data[1] >> 24; - gen_spec->loc_mac[3] = spec->data[1] >> 16; - gen_spec->loc_mac[4] = spec->data[1] >> 8; - gen_spec->loc_mac[5] = spec->data[1]; - gen_spec->outer_vid = htons(spec->data[0]); - break; - - case EFX_FARCH_FILTER_UC_DEF: - case EFX_FARCH_FILTER_MC_DEF: - gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; - gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; - break; - - default: - WARN_ON(1); - break; - } -} - -static void -efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table = - &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; - struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; - - /* If there's only one channel then disable RSS for non VF - * traffic, thereby allowing VFs to use RSS when the PF can't. - */ - spec->type = EFX_FARCH_FILTER_UC_DEF + filter_idx; - spec->priority = EFX_FILTER_PRI_MANUAL; - spec->flags = (EFX_FILTER_FLAG_RX | - (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | - (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); - spec->dmaq_id = 0; - table->used_bitmap[0] |= 1 << filter_idx; -} - -/* Build a filter entry and return its n-tuple key. */ -static u32 efx_farch_filter_build(efx_oword_t *filter, - struct efx_farch_filter_spec *spec) -{ - u32 data3; - - switch (efx_farch_filter_spec_table_id(spec)) { - case EFX_FARCH_FILTER_TABLE_RX_IP: { - bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || - spec->type == EFX_FARCH_FILTER_UDP_WILD); - EFX_POPULATE_OWORD_7( - *filter, - FRF_BZ_RSS_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), - FRF_BZ_SCATTER_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), - FRF_BZ_TCP_UDP, is_udp, - FRF_BZ_RXQ_ID, spec->dmaq_id, - EFX_DWORD_2, spec->data[2], - EFX_DWORD_1, spec->data[1], - EFX_DWORD_0, spec->data[0]); - data3 = is_udp; - break; - } - - case EFX_FARCH_FILTER_TABLE_RX_MAC: { - bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; - EFX_POPULATE_OWORD_7( - *filter, - FRF_CZ_RMFT_RSS_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), - FRF_CZ_RMFT_SCATTER_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), - FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, - FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, - FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], - FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], - FRF_CZ_RMFT_VLAN_ID, spec->data[0]); - data3 = is_wild; - break; - } - - case EFX_FARCH_FILTER_TABLE_TX_MAC: { - bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; - EFX_POPULATE_OWORD_5(*filter, - FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, - FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, - FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], - FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], - FRF_CZ_TMFT_VLAN_ID, spec->data[0]); - data3 = is_wild | spec->dmaq_id << 1; - break; - } - - default: - BUG(); - } - - return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; -} - -static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, - const struct efx_farch_filter_spec *right) -{ - if (left->type != right->type || - memcmp(left->data, right->data, sizeof(left->data))) - return false; - - if (left->flags & EFX_FILTER_FLAG_TX && - left->dmaq_id != right->dmaq_id) - return false; - - return true; -} - -/* - * Construct/deconstruct external filter IDs. At least the RX filter - * IDs must be ordered by matching priority, for RX NFC semantics. - * - * Deconstruction needs to be robust against invalid IDs so that - * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can - * accept user-provided IDs. - */ - -#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 - -static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { - [EFX_FARCH_FILTER_TCP_FULL] = 0, - [EFX_FARCH_FILTER_UDP_FULL] = 0, - [EFX_FARCH_FILTER_TCP_WILD] = 1, - [EFX_FARCH_FILTER_UDP_WILD] = 1, - [EFX_FARCH_FILTER_MAC_FULL] = 2, - [EFX_FARCH_FILTER_MAC_WILD] = 3, - [EFX_FARCH_FILTER_UC_DEF] = 4, - [EFX_FARCH_FILTER_MC_DEF] = 4, -}; - -static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { - EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ - EFX_FARCH_FILTER_TABLE_RX_IP, - EFX_FARCH_FILTER_TABLE_RX_MAC, - EFX_FARCH_FILTER_TABLE_RX_MAC, - EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ - EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ - EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ -}; - -#define EFX_FARCH_FILTER_INDEX_WIDTH 13 -#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) - -static inline u32 -efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, - unsigned int index) -{ - unsigned int range; - - range = efx_farch_filter_type_match_pri[spec->type]; - if (!(spec->flags & EFX_FILTER_FLAG_RX)) - range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; - - return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; -} - -static inline enum efx_farch_filter_table_id -efx_farch_filter_id_table_id(u32 id) -{ - unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; - - if (range < ARRAY_SIZE(efx_farch_filter_range_table)) - return efx_farch_filter_range_table[range]; - else - return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ -} - -static inline unsigned int efx_farch_filter_id_index(u32 id) -{ - return id & EFX_FARCH_FILTER_INDEX_MASK; -} - -u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; - enum efx_farch_filter_table_id table_id; - - do { - table_id = efx_farch_filter_range_table[range]; - if (state->table[table_id].size != 0) - return range << EFX_FARCH_FILTER_INDEX_WIDTH | - state->table[table_id].size; - } while (range--); - - return 0; -} - -/** - * efx_filter_insert_filter - add or replace a filter - * @efx: NIC in which to insert the filter - * @spec: Specification for the filter - * @replace_equal: Flag for whether the specified filter may replace an - * existing filter with equal priority - * - * On success, return the filter ID. - * On failure, return a negative error code. - * - * If an existing filter has equal match values to the new filter - * spec, then the new filter might replace it, depending on the - * relative priorities. If the existing filter has lower priority, or - * if @replace_equal is set and it has equal priority, then it is - * replaced. Otherwise the function fails, returning -%EPERM if - * the existing filter has higher priority or -%EEXIST if it has - * equal priority. - */ -s32 efx_filter_insert_filter(struct efx_nic *efx, - struct efx_filter_spec *gen_spec, - bool replace_equal) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table; - struct efx_farch_filter_spec spec; - efx_oword_t filter; - int rep_index, ins_index; - unsigned int depth = 0; - int rc; - - rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); - if (rc) - return rc; - - table = &state->table[efx_farch_filter_spec_table_id(&spec)]; - if (table->size == 0) - return -EINVAL; - - netif_vdbg(efx, hw, efx->net_dev, - "%s: type %d search_limit=%d", __func__, spec.type, - table->search_limit[spec.type]); - - if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { - /* One filter spec per type */ - BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); - BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != - EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); - rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; - ins_index = rep_index; - - spin_lock_bh(&efx->filter_lock); - } else { - /* Search concurrently for - * (1) a filter to be replaced (rep_index): any filter - * with the same match values, up to the current - * search depth for this type, and - * (2) the insertion point (ins_index): (1) or any - * free slot before it or up to the maximum search - * depth for this priority - * We fail if we cannot find (2). - * - * We can stop once either - * (a) we find (1), in which case we have definitely - * found (2) as well; or - * (b) we have searched exhaustively for (1), and have - * either found (2) or searched exhaustively for it - */ - u32 key = efx_farch_filter_build(&filter, &spec); - unsigned int hash = efx_farch_filter_hash(key); - unsigned int incr = efx_farch_filter_increment(key); - unsigned int max_rep_depth = table->search_limit[spec.type]; - unsigned int max_ins_depth = - spec.priority <= EFX_FILTER_PRI_HINT ? - EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : - EFX_FARCH_FILTER_CTL_SRCH_MAX; - unsigned int i = hash & (table->size - 1); - - ins_index = -1; - depth = 1; - - spin_lock_bh(&efx->filter_lock); - - for (;;) { - if (!test_bit(i, table->used_bitmap)) { - if (ins_index < 0) - ins_index = i; - } else if (efx_farch_filter_equal(&spec, - &table->spec[i])) { - /* Case (a) */ - if (ins_index < 0) - ins_index = i; - rep_index = i; - break; - } - - if (depth >= max_rep_depth && - (ins_index >= 0 || depth >= max_ins_depth)) { - /* Case (b) */ - if (ins_index < 0) { - rc = -EBUSY; - goto out; - } - rep_index = -1; - break; - } - - i = (i + incr) & (table->size - 1); - ++depth; - } - } - - /* If we found a filter to be replaced, check whether we - * should do so - */ - if (rep_index >= 0) { - struct efx_farch_filter_spec *saved_spec = - &table->spec[rep_index]; - - if (spec.priority == saved_spec->priority && !replace_equal) { - rc = -EEXIST; - goto out; - } - if (spec.priority < saved_spec->priority) { - rc = -EPERM; - goto out; - } - } - - /* Insert the filter */ - if (ins_index != rep_index) { - __set_bit(ins_index, table->used_bitmap); - ++table->used; - } - table->spec[ins_index] = spec; - - if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { - efx_farch_filter_push_rx_config(efx); - } else { - if (table->search_limit[spec.type] < depth) { - table->search_limit[spec.type] = depth; - if (spec.flags & EFX_FILTER_FLAG_TX) - efx_farch_filter_push_tx_limits(efx); - else - efx_farch_filter_push_rx_config(efx); - } - - efx_writeo(efx, &filter, - table->offset + table->step * ins_index); - - /* If we were able to replace a filter by inserting - * at a lower depth, clear the replaced filter - */ - if (ins_index != rep_index && rep_index >= 0) - efx_farch_filter_table_clear_entry(efx, table, - rep_index); - } - - netif_vdbg(efx, hw, efx->net_dev, - "%s: filter type %d index %d rxq %u set", - __func__, spec.type, ins_index, spec.dmaq_id); - rc = efx_farch_filter_make_id(&spec, ins_index); - -out: - spin_unlock_bh(&efx->filter_lock); - return rc; -} - -static void -efx_farch_filter_table_clear_entry(struct efx_nic *efx, - struct efx_farch_filter_table *table, - unsigned int filter_idx) -{ - static efx_oword_t filter; - - if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { - /* RX default filters must always exist */ - efx_farch_filter_reset_rx_def(efx, filter_idx); - efx_farch_filter_push_rx_config(efx); - } else if (test_bit(filter_idx, table->used_bitmap)) { - __clear_bit(filter_idx, table->used_bitmap); - --table->used; - memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); - - efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); - - /* If this filter required a greater search depth than - * any other, the search limit for its type can now be - * decreased. However, it is hard to determine that - * unless the table has become completely empty - in - * which case, all its search limits can be set to 0. - */ - if (unlikely(table->used == 0)) { - memset(table->search_limit, 0, - sizeof(table->search_limit)); - if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) - efx_farch_filter_push_tx_limits(efx); - else - efx_farch_filter_push_rx_config(efx); - } - } -} - -/** - * efx_filter_remove_id_safe - remove a filter by ID, carefully - * @efx: NIC from which to remove the filter - * @priority: Priority of filter, as passed to @efx_filter_insert_filter - * @filter_id: ID of filter, as returned by @efx_filter_insert_filter - * - * This function will range-check @filter_id, so it is safe to call - * with a value passed from userland. - */ -int efx_filter_remove_id_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - unsigned int filter_idx; - struct efx_farch_filter_spec *spec; - int rc; - - table_id = efx_farch_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) - return -ENOENT; - table = &state->table[table_id]; - - filter_idx = efx_farch_filter_id_index(filter_id); - if (filter_idx >= table->size) - return -ENOENT; - spec = &table->spec[filter_idx]; - - spin_lock_bh(&efx->filter_lock); - - if (test_bit(filter_idx, table->used_bitmap) && - spec->priority == priority) { - efx_farch_filter_table_clear_entry(efx, table, filter_idx); - rc = 0; - } else { - rc = -ENOENT; - } - - spin_unlock_bh(&efx->filter_lock); - - return rc; -} - -/** - * efx_filter_get_filter_safe - retrieve a filter by ID, carefully - * @efx: NIC from which to remove the filter - * @priority: Priority of filter, as passed to @efx_filter_insert_filter - * @filter_id: ID of filter, as returned by @efx_filter_insert_filter - * @spec: Buffer in which to store filter specification - * - * This function will range-check @filter_id, so it is safe to call - * with a value passed from userland. - */ -int efx_filter_get_filter_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id, struct efx_filter_spec *spec_buf) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - struct efx_farch_filter_spec *spec; - unsigned int filter_idx; - int rc; - - table_id = efx_farch_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) - return -ENOENT; - table = &state->table[table_id]; - - filter_idx = efx_farch_filter_id_index(filter_id); - if (filter_idx >= table->size) - return -ENOENT; - spec = &table->spec[filter_idx]; - - spin_lock_bh(&efx->filter_lock); - - if (test_bit(filter_idx, table->used_bitmap) && - spec->priority == priority) { - efx_farch_filter_to_gen_spec(spec_buf, spec); - rc = 0; - } else { - rc = -ENOENT; - } - - spin_unlock_bh(&efx->filter_lock); - - return rc; -} - -static void -efx_farch_filter_table_clear(struct efx_nic *efx, - enum efx_farch_filter_table_id table_id, - enum efx_filter_priority priority) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table = &state->table[table_id]; - unsigned int filter_idx; - - spin_lock_bh(&efx->filter_lock); - for (filter_idx = 0; filter_idx < table->size; ++filter_idx) - if (table->spec[filter_idx].priority <= priority) - efx_farch_filter_table_clear_entry(efx, table, - filter_idx); - spin_unlock_bh(&efx->filter_lock); -} - -/** - * efx_filter_clear_rx - remove RX filters by priority - * @efx: NIC from which to remove the filters - * @priority: Maximum priority to remove - */ -void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) -{ - efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, - priority); - efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, - priority); -} - -u32 efx_filter_count_rx_used(struct efx_nic *efx, - enum efx_filter_priority priority) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - unsigned int filter_idx; - u32 count = 0; - - spin_lock_bh(&efx->filter_lock); - - for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; - table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; - table_id++) { - table = &state->table[table_id]; - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (test_bit(filter_idx, table->used_bitmap) && - table->spec[filter_idx].priority == priority) - ++count; - } - } - - spin_unlock_bh(&efx->filter_lock); - - return count; -} - -s32 efx_filter_get_rx_ids(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 *buf, u32 size) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - unsigned int filter_idx; - s32 count = 0; - - spin_lock_bh(&efx->filter_lock); - - for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; - table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; - table_id++) { - table = &state->table[table_id]; - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (test_bit(filter_idx, table->used_bitmap) && - table->spec[filter_idx].priority == priority) { - if (count == size) { - count = -EMSGSIZE; - goto out; - } - buf[count++] = efx_farch_filter_make_id( - &table->spec[filter_idx], filter_idx); - } - } - } -out: - spin_unlock_bh(&efx->filter_lock); - - return count; -} - -/* Restore filter stater after reset */ -void efx_restore_filters(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - efx_oword_t filter; - unsigned int filter_idx; - - spin_lock_bh(&efx->filter_lock); - - for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { - table = &state->table[table_id]; - - /* Check whether this is a regular register table */ - if (table->step == 0) - continue; - - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (!test_bit(filter_idx, table->used_bitmap)) - continue; - efx_farch_filter_build(&filter, &table->spec[filter_idx]); - efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); - } - } - - efx_farch_filter_push_rx_config(efx); - efx_farch_filter_push_tx_limits(efx); - - spin_unlock_bh(&efx->filter_lock); -} - -int efx_probe_filters(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state; - struct efx_farch_filter_table *table; - unsigned table_id; - - state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); - if (!state) - return -ENOMEM; - efx->filter_state = state; - - spin_lock_init(&efx->filter_lock); - - if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { -#ifdef CONFIG_RFS_ACCEL - efx->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS, - sizeof(*efx->rps_flow_id), - GFP_KERNEL); - if (!efx->rps_flow_id) - goto fail; -#endif - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; - table->id = EFX_FARCH_FILTER_TABLE_RX_IP; - table->offset = FR_BZ_RX_FILTER_TBL0; - table->size = FR_BZ_RX_FILTER_TBL0_ROWS; - table->step = FR_BZ_RX_FILTER_TBL0_STEP; - } - - if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; - table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; - table->offset = FR_CZ_RX_MAC_FILTER_TBL0; - table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; - table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; - table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; - table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; - - table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; - table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; - table->offset = FR_CZ_TX_MAC_FILTER_TBL0; - table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; - table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; - } - - for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { - table = &state->table[table_id]; - if (table->size == 0) - continue; - table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), - sizeof(unsigned long), - GFP_KERNEL); - if (!table->used_bitmap) - goto fail; - table->spec = vzalloc(table->size * sizeof(*table->spec)); - if (!table->spec) - goto fail; - } - - if (state->table[EFX_FARCH_FILTER_TABLE_RX_DEF].size) { - /* RX default filters must always exist */ - unsigned i; - for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) - efx_farch_filter_reset_rx_def(efx, i); - } - - efx_farch_filter_push_rx_config(efx); - - return 0; - -fail: - efx_remove_filters(efx); - return -ENOMEM; -} - -void efx_remove_filters(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - - for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { - kfree(state->table[table_id].used_bitmap); - vfree(state->table[table_id].spec); - } -#ifdef CONFIG_RFS_ACCEL - kfree(efx->rps_flow_id); -#endif - kfree(state); -} - -/* Update scatter enable flags for filters pointing to our own RX queues */ -void efx_filter_update_rx_scatter(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - efx_oword_t filter; - unsigned int filter_idx; - - spin_lock_bh(&efx->filter_lock); - - for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; - table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; - table_id++) { - table = &state->table[table_id]; - - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (!test_bit(filter_idx, table->used_bitmap) || - table->spec[filter_idx].dmaq_id >= - efx->n_rx_channels) - continue; - - if (efx->rx_scatter) - table->spec[filter_idx].flags |= - EFX_FILTER_FLAG_RX_SCATTER; - else - table->spec[filter_idx].flags &= - ~EFX_FILTER_FLAG_RX_SCATTER; - - if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) - /* Pushed by efx_farch_filter_push_rx_config() */ - continue; - - efx_farch_filter_build(&filter, &table->spec[filter_idx]); - efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); - } - } - - efx_farch_filter_push_rx_config(efx); - - spin_unlock_bh(&efx->filter_lock); -} - -#ifdef CONFIG_RFS_ACCEL - -int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, - u16 rxq_index, u32 flow_id) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_channel *channel; - struct efx_filter_spec spec; - const struct iphdr *ip; - const __be16 *ports; - int nhoff; - int rc; - - nhoff = skb_network_offset(skb); - - if (skb->protocol == htons(ETH_P_8021Q)) { - EFX_BUG_ON_PARANOID(skb_headlen(skb) < - nhoff + sizeof(struct vlan_hdr)); - if (((const struct vlan_hdr *)skb->data + nhoff)-> - h_vlan_encapsulated_proto != htons(ETH_P_IP)) - return -EPROTONOSUPPORT; - - /* This is IP over 802.1q VLAN. We can't filter on the - * IP 5-tuple and the vlan together, so just strip the - * vlan header and filter on the IP part. - */ - nhoff += sizeof(struct vlan_hdr); - } else if (skb->protocol != htons(ETH_P_IP)) { - return -EPROTONOSUPPORT; - } - - /* RFS must validate the IP header length before calling us */ - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); - ip = (const struct iphdr *)(skb->data + nhoff); - if (ip_is_fragment(ip)) - return -EPROTONOSUPPORT; - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); - ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); - - efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, - efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, - rxq_index); - rc = efx_filter_set_ipv4_full(&spec, ip->protocol, - ip->daddr, ports[1], ip->saddr, ports[0]); - if (rc) - return rc; - - rc = efx_filter_insert_filter(efx, &spec, true); - if (rc < 0) - return rc; - - /* Remember this so we can check whether to expire the filter later */ - efx->rps_flow_id[rc] = flow_id; - channel = efx_get_channel(efx, skb_get_rx_queue(skb)); - ++channel->rfs_filters_added; - - netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", - (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", - &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), - rxq_index, flow_id, rc); - - return rc; -} - -bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table = - &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; - unsigned mask = table->size - 1; - unsigned index; - unsigned stop; - - if (!spin_trylock_bh(&efx->filter_lock)) - return false; - - index = efx->rps_expire_index; - stop = (index + quota) & mask; - - while (index != stop) { - if (test_bit(index, table->used_bitmap) && - table->spec[index].priority == EFX_FILTER_PRI_HINT && - rps_may_expire_flow(efx->net_dev, - table->spec[index].dmaq_id, - efx->rps_flow_id[index], index)) { - netif_info(efx, rx_status, efx->net_dev, - "expiring filter %d [flow %u]\n", - index, efx->rps_flow_id[index]); - efx_farch_filter_table_clear_entry(efx, table, index); - } - index = (index + 1) & mask; - } - - efx->rps_expire_index = stop; - - spin_unlock_bh(&efx->filter_lock); - return true; -} - -#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5287a3c..d35ce14 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -30,6 +30,7 @@ #include "enum.h" #include "bitfield.h" +#include "filter.h" /************************************************************************** * @@ -1025,6 +1026,24 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @ev_process: Process events for a queue, up to the given NAPI quota * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ * @ev_test_generate: Generate a test event + * @filter_table_probe: Probe filter capabilities and set up filter software state + * @filter_table_restore: Restore filters removed from hardware + * @filter_table_remove: Remove filters from hardware and tear down software state + * @filter_update_rx_scatter: Update filters after change to rx scatter setting + * @filter_insert: add or replace a filter + * @filter_remove_safe: remove a filter by ID, carefully + * @filter_get_safe: retrieve a filter by ID, carefully + * @filter_clear_rx: remove RX filters by priority + * @filter_count_rx_used: Get the number of filters in use at a given priority + * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 + * @filter_get_rx_ids: Get list of RX filters at a given priority + * @filter_rfs_insert: Add or replace a filter for RFS. This must be + * atomic. The hardware change may be asynchronous but should + * not be delayed for long. It may fail if this can't be done + * atomically. + * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. + * This must check whether the specified table entry is used by RFS + * and that rps_may_expire_flow() returns true for it. * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1102,6 +1121,32 @@ struct efx_nic_type { int (*ev_process)(struct efx_channel *channel, int quota); void (*ev_read_ack)(struct efx_channel *channel); void (*ev_test_generate)(struct efx_channel *channel); + int (*filter_table_probe)(struct efx_nic *efx); + void (*filter_table_restore)(struct efx_nic *efx); + void (*filter_table_remove)(struct efx_nic *efx); + void (*filter_update_rx_scatter)(struct efx_nic *efx); + s32 (*filter_insert)(struct efx_nic *efx, + struct efx_filter_spec *spec, bool replace); + int (*filter_remove_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); + int (*filter_get_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *); + void (*filter_clear_rx)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_count_rx_used)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_get_rx_id_limit)(struct efx_nic *efx); + s32 (*filter_get_rx_ids)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); +#ifdef CONFIG_RFS_ACCEL + s32 (*filter_rfs_insert)(struct efx_nic *efx, + struct efx_filter_spec *spec); + bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif int revision; unsigned int txd_ptr_tbl_base; @@ -1117,6 +1162,7 @@ struct efx_nic_type { unsigned int timer_period_max; netdev_features_t offload_features; int mcdi_max_ver; + unsigned int max_rx_ip_filters; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 25e25b6..69298c9 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -404,6 +404,34 @@ extern int efx_farch_ev_process(struct efx_channel *channel, int quota); extern void efx_farch_ev_read_ack(struct efx_channel *channel); extern void efx_farch_ev_test_generate(struct efx_channel *channel); +/* Falcon/Siena filter operations */ +extern int efx_farch_filter_table_probe(struct efx_nic *efx); +extern void efx_farch_filter_table_restore(struct efx_nic *efx); +extern void efx_farch_filter_table_remove(struct efx_nic *efx); +extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); +extern s32 efx_farch_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *spec, bool replace); +extern int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); +extern int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *); +extern void efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); +extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority); +extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); +extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); +#ifdef CONFIG_RFS_ACCEL +extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, + struct efx_filter_spec *spec); +extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif + extern bool efx_nic_event_present(struct efx_channel *channel); /* Some statistics are computed as A - B where A and B each increase diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index f2b78cd..1299092 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -21,6 +21,7 @@ #include #include "net_driver.h" #include "efx.h" +#include "filter.h" #include "nic.h" #include "selftest.h" #include "workarounds.h" @@ -802,3 +803,96 @@ module_param(rx_refill_threshold, uint, 0444); MODULE_PARM_DESC(rx_refill_threshold, "RX descriptor ring refill threshold (%)"); +#ifdef CONFIG_RFS_ACCEL + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_filter_spec spec; + const struct iphdr *ip; + const __be16 *ports; + int nhoff; + int rc; + + nhoff = skb_network_offset(skb); + + if (skb->protocol == htons(ETH_P_8021Q)) { + EFX_BUG_ON_PARANOID(skb_headlen(skb) < + nhoff + sizeof(struct vlan_hdr)); + if (((const struct vlan_hdr *)skb->data + nhoff)-> + h_vlan_encapsulated_proto != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + + /* This is IP over 802.1q VLAN. We can't filter on the + * IP 5-tuple and the vlan together, so just strip the + * vlan header and filter on the IP part. + */ + nhoff += sizeof(struct vlan_hdr); + } else if (skb->protocol != htons(ETH_P_IP)) { + return -EPROTONOSUPPORT; + } + + /* RFS must validate the IP header length before calling us */ + EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); + ip = (const struct iphdr *)(skb->data + nhoff); + if (ip_is_fragment(ip)) + return -EPROTONOSUPPORT; + EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); + ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + rxq_index); + rc = efx_filter_set_ipv4_full(&spec, ip->protocol, + ip->daddr, ports[1], ip->saddr, ports[0]); + if (rc) + return rc; + + rc = efx->type->filter_rfs_insert(efx, &spec); + if (rc < 0) + return rc; + + /* Remember this so we can check whether to expire the filter later */ + efx->rps_flow_id[rc] = flow_id; + channel = efx_get_channel(efx, skb_get_rx_queue(skb)); + ++channel->rfs_filters_added; + + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", + &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), + rxq_index, flow_id, rc); + + return rc; +} + +bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) +{ + bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); + unsigned int index, size; + u32 flow_id; + + if (!spin_trylock_bh(&efx->filter_lock)) + return false; + + expire_one = efx->type->filter_rfs_expire_one; + index = efx->rps_expire_index; + size = efx->type->max_rx_ip_filters; + while (quota--) { + flow_id = efx->rps_flow_id[index]; + if (expire_one(efx, flow_id, index)) + netif_info(efx, rx_status, efx->net_dev, + "expired filter %d [flow %u]\n", + index, flow_id); + if (++index == size) + index = 0; + } + efx->rps_expire_index = index; + + spin_unlock_bh(&efx->filter_lock); + return true; +} + +#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 23e5731..5120cd8 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -736,6 +736,21 @@ const struct efx_nic_type siena_a0_nic_type = { .ev_process = efx_farch_ev_process, .ev_read_ack = efx_farch_ev_read_ack, .ev_test_generate = efx_farch_ev_test_generate, + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_insert = efx_farch_filter_rfs_insert, + .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, +#endif .revision = EFX_REV_SIENA_A0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, @@ -752,4 +767,5 @@ const struct efx_nic_type siena_a0_nic_type = { .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE), .mcdi_max_ver = 1, + .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, }; -- cgit v0.10.2 From 14990a5d66b5b51a422bc46a9e8fedb20d8a770c Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 19 Nov 2012 23:08:19 +0000 Subject: sfc: Refactor Falcon-arch filter removal Move the special case for removal of default filters from efx_farch_filter_table_clear_entry() into a wrapper function, efx_farch_filter_table_remove(). Move the existence and priority checks into the latter and use it where appropriate. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 1941804..d91bced 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2516,33 +2516,49 @@ efx_farch_filter_table_clear_entry(struct efx_nic *efx, { static efx_oword_t filter; + EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); + + __clear_bit(filter_idx, table->used_bitmap); + --table->used; + memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); + + efx_writeo(efx, &filter, table->offset + table->step * filter_idx); + + /* If this filter required a greater search depth than + * any other, the search limit for its type can now be + * decreased. However, it is hard to determine that + * unless the table has become completely empty - in + * which case, all its search limits can be set to 0. + */ + if (unlikely(table->used == 0)) { + memset(table->search_limit, 0, sizeof(table->search_limit)); + if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } +} + +static int efx_farch_filter_remove(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; + + if (!test_bit(filter_idx, table->used_bitmap) || + spec->priority > priority) + return -ENOENT; + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { /* RX default filters must always exist */ efx_farch_filter_reset_rx_def(efx, filter_idx); efx_farch_filter_push_rx_config(efx); - } else if (test_bit(filter_idx, table->used_bitmap)) { - __clear_bit(filter_idx, table->used_bitmap); - --table->used; - memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); - - efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); - - /* If this filter required a greater search depth than - * any other, the search limit for its type can now be - * decreased. However, it is hard to determine that - * unless the table has become completely empty - in - * which case, all its search limits can be set to 0. - */ - if (unlikely(table->used == 0)) { - memset(table->search_limit, 0, - sizeof(table->search_limit)); - if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) - efx_farch_filter_push_tx_limits(efx); - else - efx_farch_filter_push_rx_config(efx); - } + } else { + efx_farch_filter_table_clear_entry(efx, table, filter_idx); } + + return 0; } int efx_farch_filter_remove_safe(struct efx_nic *efx, @@ -2567,15 +2583,7 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx, spec = &table->spec[filter_idx]; spin_lock_bh(&efx->filter_lock); - - if (test_bit(filter_idx, table->used_bitmap) && - spec->priority == priority) { - efx_farch_filter_table_clear_entry(efx, table, filter_idx); - rc = 0; - } else { - rc = -ENOENT; - } - + rc = efx_farch_filter_remove(efx, table, filter_idx, priority); spin_unlock_bh(&efx->filter_lock); return rc; @@ -2628,9 +2636,7 @@ efx_farch_filter_table_clear(struct efx_nic *efx, spin_lock_bh(&efx->filter_lock); for (filter_idx = 0; filter_idx < table->size; ++filter_idx) - if (table->spec[filter_idx].priority <= priority) - efx_farch_filter_table_clear_entry(efx, table, - filter_idx); + efx_farch_filter_remove(efx, table, filter_idx, priority); spin_unlock_bh(&efx->filter_lock); } -- cgit v0.10.2 From 8803e15042fbf5bd53b9b43147a91e3f50cffb03 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 19 Nov 2012 23:08:20 +0000 Subject: sfc: Add flag for stack-owned RX MAC filters MAC filters inserted on request from the stack (ndo_set_rx_mode) should allow manual steering but not removal. Currently we have a special case for Siena's all-multicast and all-unicast MAC filters, but on EF10 we need to allow for steering of precise MAC filters as well. The EFX_FILTER_FLAG_RX_STACK flag changes the behaviour of replacement and removal requests: - Replacement *of* a filter with this flag never clears the flag but does change steering and saved priority - Replacement *by* a filter with this flag only sets the flag but does not change steering - Removal with priority < EFX_FILTER_PRI_REQUIRED really resets RX steering and saved priority This could support precise MAC filtering on Siena in future. As a side-benefit, the default MAC filters are hidden from ethtool until they are steered. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index d91bced..1e21e51 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2186,23 +2186,17 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, } static void -efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) +efx_farch_filter_init_rx_for_stack(struct efx_nic *efx, + struct efx_farch_filter_spec *spec) { - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table = - &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; - struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; - /* If there's only one channel then disable RSS for non VF * traffic, thereby allowing VFs to use RSS when the PF can't. */ - spec->type = EFX_FARCH_FILTER_UC_DEF + filter_idx; - spec->priority = EFX_FILTER_PRI_MANUAL; - spec->flags = (EFX_FILTER_FLAG_RX | + spec->priority = EFX_FILTER_PRI_REQUIRED; + spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK | (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); spec->dmaq_id = 0; - table->used_bitmap[0] |= 1 << filter_idx; } /* Build a filter entry and return its n-tuple key. */ @@ -2464,10 +2458,20 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, rc = -EEXIST; goto out; } - if (spec.priority < saved_spec->priority) { + if (spec.priority < saved_spec->priority && + !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED && + saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) { rc = -EPERM; goto out; } + if (spec.flags & EFX_FILTER_FLAG_RX_STACK) { + /* Just make sure it won't be removed */ + saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; + rc = 0; + goto out; + } + /* Retain the RX_STACK flag */ + spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK; } /* Insert the filter */ @@ -2517,6 +2521,7 @@ efx_farch_filter_table_clear_entry(struct efx_nic *efx, static efx_oword_t filter; EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); + BUG_ON(table->offset == 0); /* can't clear MAC default filters */ __clear_bit(filter_idx, table->used_bitmap); --table->used; @@ -2550,9 +2555,8 @@ static int efx_farch_filter_remove(struct efx_nic *efx, spec->priority > priority) return -ENOENT; - if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { - /* RX default filters must always exist */ - efx_farch_filter_reset_rx_def(efx, filter_idx); + if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { + efx_farch_filter_init_rx_for_stack(efx, spec); efx_farch_filter_push_rx_config(efx); } else { efx_farch_filter_table_clear_entry(efx, table, filter_idx); @@ -2647,6 +2651,8 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx, priority); efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, + priority); } u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, @@ -2806,11 +2812,18 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) goto fail; } - if (state->table[EFX_FARCH_FILTER_TABLE_RX_DEF].size) { + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { /* RX default filters must always exist */ + struct efx_farch_filter_spec *spec; unsigned i; - for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) - efx_farch_filter_reset_rx_def(efx, i); + + for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { + spec = &table->spec[i]; + spec->type = EFX_FARCH_FILTER_UC_DEF + i; + efx_farch_filter_init_rx_for_stack(efx, spec); + __set_bit(i, table->used_bitmap); + } } efx_farch_filter_push_rx_config(efx); diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 1b410de..e459e43 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -78,12 +78,19 @@ enum efx_filter_priority { * according to the indirection table. * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving * queue. + * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the + * network stack. The filter must have a priority of + * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement + * request with priority %EFX_FILTER_PRI_MANUAL, and a removal + * request with priority %EFX_FILTER_PRI_MANUAL will reset the + * steering (but not remove the filter). * @EFX_FILTER_FLAG_RX: Filter is for RX * @EFX_FILTER_FLAG_TX: Filter is for TX */ enum efx_filter_flags { EFX_FILTER_FLAG_RX_RSS = 0x01, EFX_FILTER_FLAG_RX_SCATTER = 0x02, + EFX_FILTER_FLAG_RX_STACK = 0x04, EFX_FILTER_FLAG_RX = 0x08, EFX_FILTER_FLAG_TX = 0x10, }; -- cgit v0.10.2 From f5253d92567b193c6aa137a08e7bb3b06fafc985 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 10 Oct 2012 23:24:51 +0100 Subject: sfc: Define and use MCDI_POPULATE_DWORD_{1,2,3,4,5,6,7} There is only one user now, but we're about to add many more. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 69586e0..a465cc1 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -128,6 +128,60 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) #define MCDI_DWORD(_buf, _field) \ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1) +#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \ + _name2, _value2) \ + EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2) +#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3) \ + EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3) +#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4) \ + EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4) +#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5) \ + EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5) +#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6) \ + EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6) +#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7) \ + EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7) #define MCDI_SET_QWORD(_buf, _field, _value) \ do { \ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index e359227..30e8a19 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -926,21 +926,19 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, { MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); int rc; - efx_dword_t *cmd_ptr; int period = enable ? 1000 : 0; BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); - cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); - EFX_POPULATE_DWORD_7(*cmd_ptr, - MC_CMD_MAC_STATS_IN_DMA, !!enable, - MC_CMD_MAC_STATS_IN_CLEAR, clear, - MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1, - MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable, - MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0, - MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1, - MC_CMD_MAC_STATS_IN_PERIOD_MS, period); + MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, !!enable, + MAC_STATS_IN_CLEAR, clear, + MAC_STATS_IN_PERIODIC_CHANGE, 1, + MAC_STATS_IN_PERIODIC_ENABLE, !!enable, + MAC_STATS_IN_PERIODIC_CLEAR, 0, + MAC_STATS_IN_PERIODIC_NOEVENT, 1, + MAC_STATS_IN_PERIOD_MS, period); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), -- cgit v0.10.2 From 964e61355e94905e4234839d4b41678998d617b7 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 19 Nov 2012 23:08:22 +0000 Subject: sfc: Cleanup Falcon-arch simple MAC filter state On Falcon we implement MAC filtering requested by the stack using the MAC wrapper's single unicast filter and multicast hash filter. Siena is very similar, though MAC configuration is mediated by the MC. Since MCDI operations may sleep, reconfiguration is deferred from ndo_set_rx_mode to a work item. However, it still updates the private variables describing the filter state synchronously. Contrary to comments, the later use of these variables is not protected using the address lock, resulting in race conditions. Move the state update to a new function efx_farch_filter_sync_rx_mode() and make the Falcon-arch MAC configuration functions call that, so that its use is consistently serialised by the mac_lock. Invert and rename the promiscuous flag to the more accurate unicast_filter, and comment that both this and multicast_hash are not used on EF10. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index a2daaae..1d4f388 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -876,10 +875,9 @@ void efx_link_status_changed(struct efx_nic *efx) /* Status message for kernel log */ if (link_state->up) netif_info(efx, link, efx->net_dev, - "link up at %uMbps %s-duplex (MTU %d)%s\n", + "link up at %uMbps %s-duplex (MTU %d)\n", link_state->speed, link_state->fd ? "full" : "half", - efx->net_dev->mtu, - (efx->promiscuous ? " [PROMISC]" : "")); + efx->net_dev->mtu); else netif_info(efx, link, efx->net_dev, "link down\n"); } @@ -928,10 +926,6 @@ int __efx_reconfigure_port(struct efx_nic *efx) WARN_ON(!mutex_is_locked(&efx->mac_lock)); - /* Serialise the promiscuous flag with efx_set_rx_mode. */ - netif_addr_lock_bh(efx->net_dev); - netif_addr_unlock_bh(efx->net_dev); - /* Disable PHY transmit in mac level loopbacks */ phy_mode = efx->phy_mode; if (LOOPBACK_INTERNAL(efx)) @@ -2027,30 +2021,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) static void efx_set_rx_mode(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - struct netdev_hw_addr *ha; - union efx_multicast_hash *mc_hash = &efx->multicast_hash; - u32 crc; - int bit; - - efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); - - /* Build multicast hash table */ - if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { - memset(mc_hash, 0xff, sizeof(*mc_hash)); - } else { - memset(mc_hash, 0x00, sizeof(*mc_hash)); - netdev_for_each_mc_addr(ha, net_dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); - __set_bit_le(bit, mc_hash); - } - - /* Broadcast packets go through the multicast hash filter. - * ether_crc_le() of the broadcast address is 0xbe2612ff - * so we always add bit 0xff to the mask. - */ - __set_bit_le(0xff, mc_hash); - } if (efx->port_enabled) queue_work(efx->workqueue, &efx->mac_work); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 6ea28f8..983e7f5 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -764,7 +764,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx) FRF_AB_XM_RXEN, 1, FRF_AB_XM_AUTO_DEPAD, 0, FRF_AB_XM_ACPT_ALL_MCAST, 1, - FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, + FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter, FRF_AB_XM_PASS_CRC_ERR, 1); efx_writeo(efx, ®, FR_AB_XM_RX_CFG); @@ -864,6 +864,8 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; + efx_farch_filter_sync_rx_mode(efx); + falcon_reconfigure_xgxs_core(efx); falcon_reconfigure_xmac_core(efx); @@ -1081,7 +1083,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) EFX_POPULATE_OWORD_5(reg, FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, FRF_AB_MAC_BCAD_ACPT, 1, - FRF_AB_MAC_UC_PROM, efx->promiscuous, + FRF_AB_MAC_UC_PROM, !efx->unicast_filter, FRF_AB_MAC_LINK_STATUS, 1, /* always set */ FRF_AB_MAC_SPEED, link_speed); /* On B0, MAC backpressure can be disabled and packets get diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 1e21e51..b6af8f4 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "net_driver.h" #include "bitfield.h" #include "efx.h" @@ -2906,3 +2907,36 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, } #endif /* CONFIG_RFS_ACCEL */ + +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *ha; + union efx_multicast_hash *mc_hash = &efx->multicast_hash; + u32 crc; + int bit; + + netif_addr_lock_bh(net_dev); + + efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); + + /* Build multicast hash table */ + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + memset(mc_hash, 0xff, sizeof(*mc_hash)); + } else { + memset(mc_hash, 0x00, sizeof(*mc_hash)); + netdev_for_each_mc_addr(ha, net_dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); + __set_bit_le(bit, mc_hash); + } + + /* Broadcast packets go through the multicast hash filter. + * ether_crc_le() of the broadcast address is 0xbe2612ff + * so we always add bit 0xff to the mask. + */ + __set_bit_le(0xff, mc_hash); + } + + netif_addr_unlock_bh(net_dev); +} diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 30e8a19..42d52f3 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -861,7 +861,7 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) int efx_mcdi_set_mac(struct efx_nic *efx) { - u32 reject, fcntl; + u32 fcntl; MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); @@ -873,12 +873,9 @@ int efx_mcdi_set_mac(struct efx_nic *efx) EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); - /* The MCDI command provides for controlling accept/reject - * of broadcast packets too, but the driver doesn't currently - * expose this. */ - reject = (efx->promiscuous) ? 0 : - (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN); - MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject); + /* Set simple MAC filter for Siena */ + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT, + SET_MAC_IN_REJECT_UNCST, efx->unicast_filter); switch (efx->wanted_fc) { case EFX_FC_RX | EFX_FC_TX: diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d35ce14..555dd01 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -753,8 +753,10 @@ struct vfdi_status; * @link_advertising: Autonegotiation advertising flags * @link_state: Current state of the link * @n_link_state_changes: Number of times the link has changed state - * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. - * @multicast_hash: Multicast hash table + * @unicast_filter: Flag for Falcon-arch simple unicast filter. + * Protected by @mac_lock. + * @multicast_hash: Multicast hash table for Falcon-arch. + * Protected by @mac_lock. * @wanted_fc: Wanted flow control flags * @fc_disable: When non-zero flow control is disabled. Typically used to * ensure that network back pressure doesn't delay dma queue flushes. @@ -892,7 +894,7 @@ struct efx_nic { struct efx_link_state link_state; unsigned int n_link_state_changes; - bool promiscuous; + bool unicast_filter; union efx_multicast_hash multicast_hash; u8 wanted_fc; unsigned fc_disable; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 69298c9..8baf6a1 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -431,6 +431,7 @@ extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int index); #endif +extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); extern bool efx_nic_event_present(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 5120cd8..1be81e4 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -503,6 +503,8 @@ static int siena_mac_reconfigure(struct efx_nic *efx) MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + sizeof(efx->multicast_hash)); + efx_farch_filter_sync_rx_mode(efx); + WARN_ON(!mutex_is_locked(&efx->mac_lock)); rc = efx_mcdi_set_mac(efx); -- cgit v0.10.2 From ecd0a6f0f2c70a3b713bc77d8a32d6b4ad5ad49b Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 28 Nov 2012 04:12:41 +0000 Subject: sfc: Rename SPI stuff to show that it is Falcon-specific Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 983e7f5..304131b 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -434,7 +434,7 @@ static int falcon_spi_wait(struct efx_nic *efx) } } -int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, +int falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi, unsigned int command, int address, const void *in, void *out, size_t len) { @@ -491,22 +491,22 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, } static size_t -falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start) +falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start) { return min(FALCON_SPI_MAX_LEN, (spi->block_size - (start & (spi->block_size - 1)))); } static inline u8 -efx_spi_munge_command(const struct efx_spi_device *spi, - const u8 command, const unsigned int address) +falcon_spi_munge_command(const struct falcon_spi_device *spi, + const u8 command, const unsigned int address) { return command | (((address >> 8) & spi->munge_address) << 3); } /* Wait up to 10 ms for buffered write completion */ int -falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) +falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi) { unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); u8 status; @@ -530,7 +530,7 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) } } -int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, +int falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi, loff_t start, size_t len, size_t *retlen, u8 *buffer) { size_t block_len, pos = 0; @@ -540,7 +540,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, while (pos < len) { block_len = min(len - pos, FALCON_SPI_MAX_LEN); - command = efx_spi_munge_command(spi, SPI_READ, start + pos); + command = falcon_spi_munge_command(spi, SPI_READ, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, buffer + pos, block_len); if (rc) @@ -561,7 +561,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, } int -falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, +falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { u8 verify_buffer[FALCON_SPI_MAX_LEN]; @@ -576,7 +576,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, block_len = min(len - pos, falcon_spi_write_limit(spi, start + pos)); - command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); + command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, buffer + pos, NULL, block_len); if (rc) @@ -586,7 +586,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, if (rc) break; - command = efx_spi_munge_command(spi, SPI_READ, start + pos); + command = falcon_spi_munge_command(spi, SPI_READ, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, verify_buffer, block_len); if (memcmp(verify_buffer, buffer + pos, block_len)) { @@ -1481,15 +1481,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nvconfig *nvconfig; - struct efx_spi_device *spi; + struct falcon_spi_device *spi; void *region; int rc, magic_num, struct_ver; __le16 *word, *limit; u32 csum; - if (efx_spi_present(&nic_data->spi_flash)) + if (falcon_spi_present(&nic_data->spi_flash)) spi = &nic_data->spi_flash; - else if (efx_spi_present(&nic_data->spi_eeprom)) + else if (falcon_spi_present(&nic_data->spi_eeprom)) spi = &nic_data->spi_eeprom; else return -EINVAL; @@ -1504,7 +1504,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) mutex_unlock(&nic_data->spi_lock); if (rc) { netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", - efx_spi_present(&nic_data->spi_flash) ? + falcon_spi_present(&nic_data->spi_flash) ? "flash" : "EEPROM"); rc = -EIO; goto out; @@ -1849,7 +1849,7 @@ static int falcon_reset_sram(struct efx_nic *efx) } static void falcon_spi_device_init(struct efx_nic *efx, - struct efx_spi_device *spi_device, + struct falcon_spi_device *spi_device, unsigned int device_id, u32 device_type) { if (device_type != 0) { diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index 08f825b..e4b35c6 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -22,7 +22,7 @@ #include "mcdi.h" #include "mcdi_pcol.h" -#define EFX_SPI_VERIFY_BUF_LEN 16 +#define FALCON_SPI_VERIFY_BUF_LEN 16 struct efx_mtd_partition { struct mtd_info mtd; @@ -50,7 +50,7 @@ struct efx_mtd_ops { struct efx_mtd { struct list_head node; struct efx_nic *efx; - const struct efx_spi_device *spi; + const struct falcon_spi_device *spi; const char *name; const struct efx_mtd_ops *ops; size_t n_parts; @@ -71,10 +71,10 @@ static int siena_mtd_probe(struct efx_nic *efx); /* SPI utilities */ static int -efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) +falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) { struct efx_mtd *efx_mtd = part->mtd.priv; - const struct efx_spi_device *spi = efx_mtd->spi; + const struct falcon_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = efx_mtd->efx; u8 status; int rc, i; @@ -98,7 +98,7 @@ efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) } static int -efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi) +falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi) { const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | SPI_STATUS_BP0); @@ -133,14 +133,14 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi) } static int -efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) +falcon_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) { struct efx_mtd *efx_mtd = part->mtd.priv; - const struct efx_spi_device *spi = efx_mtd->spi; + const struct falcon_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = efx_mtd->efx; unsigned pos, block_len; - u8 empty[EFX_SPI_VERIFY_BUF_LEN]; - u8 buffer[EFX_SPI_VERIFY_BUF_LEN]; + u8 empty[FALCON_SPI_VERIFY_BUF_LEN]; + u8 buffer[FALCON_SPI_VERIFY_BUF_LEN]; int rc; if (len != spi->erase_size) @@ -149,7 +149,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) if (spi->erase_command == 0) return -EOPNOTSUPP; - rc = efx_spi_unlock(efx, spi); + rc = falcon_spi_unlock(efx, spi); if (rc) return rc; rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); @@ -159,7 +159,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) NULL, 0); if (rc) return rc; - rc = efx_spi_slow_wait(part, false); + rc = falcon_spi_slow_wait(part, false); /* Verify the entire region has been wiped */ memset(empty, 0xff, sizeof(empty)); @@ -319,7 +319,7 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd *efx_mtd = mtd->priv; - const struct efx_spi_device *spi = efx_mtd->spi; + const struct falcon_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = efx_mtd->efx; struct falcon_nic_data *nic_data = efx->nic_data; int rc; @@ -344,7 +344,7 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) rc = mutex_lock_interruptible(&nic_data->spi_lock); if (rc) return rc; - rc = efx_spi_erase(part, part->offset + start, len); + rc = falcon_spi_erase(part, part->offset + start, len); mutex_unlock(&nic_data->spi_lock); return rc; } @@ -354,7 +354,7 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd *efx_mtd = mtd->priv; - const struct efx_spi_device *spi = efx_mtd->spi; + const struct falcon_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = efx_mtd->efx; struct falcon_nic_data *nic_data = efx->nic_data; int rc; @@ -377,7 +377,7 @@ static int falcon_mtd_sync(struct mtd_info *mtd) int rc; mutex_lock(&nic_data->spi_lock); - rc = efx_spi_slow_wait(part, true); + rc = falcon_spi_slow_wait(part, true); mutex_unlock(&nic_data->spi_lock); return rc; } @@ -392,14 +392,14 @@ static const struct efx_mtd_ops falcon_mtd_ops = { static int falcon_mtd_probe(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; - struct efx_spi_device *spi; + struct falcon_spi_device *spi; struct efx_mtd *efx_mtd; int rc = -ENODEV; ASSERT_RTNL(); spi = &nic_data->spi_flash; - if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { + if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), GFP_KERNEL); if (!efx_mtd) @@ -425,7 +425,7 @@ static int falcon_mtd_probe(struct efx_nic *efx) } spi = &nic_data->spi_eeprom; - if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) { + if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) { efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), GFP_KERNEL); if (!efx_mtd) @@ -439,10 +439,10 @@ static int falcon_mtd_probe(struct efx_nic *efx) efx_mtd->part[0].mtd.type = MTD_RAM; efx_mtd->part[0].mtd.flags = MTD_CAP_RAM; efx_mtd->part[0].mtd.size = - min(spi->size, EFX_EEPROM_BOOTCONFIG_END) - - EFX_EEPROM_BOOTCONFIG_START; + min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) - + FALCON_EEPROM_BOOTCONFIG_START; efx_mtd->part[0].mtd.erasesize = spi->erase_size; - efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START; + efx_mtd->part[0].offset = FALCON_EEPROM_BOOTCONFIG_START; efx_mtd->part[0].type_name = "sfc_bootconfig"; rc = efx_mtd_probe_device(efx, efx_mtd); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 8baf6a1..b90dc8a 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -184,8 +184,8 @@ struct falcon_nic_data { bool stats_pending; struct timer_list stats_timer; u32 *stats_dma_done; - struct efx_spi_device spi_flash; - struct efx_spi_device spi_eeprom; + struct falcon_spi_device spi_flash; + struct falcon_spi_device spi_eeprom; struct mutex spi_lock; struct mutex mdio_lock; bool xmac_poll_required; diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h index 5431a1b..ee951feb 100644 --- a/drivers/net/ethernet/sfc/spi.h +++ b/drivers/net/ethernet/sfc/spi.h @@ -35,7 +35,7 @@ #define SPI_STATUS_NRDY 0x01 /* Device busy flag */ /** - * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device + * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device * @device_id: Controller's id for the device * @size: Size (in bytes) * @addr_len: Number of address bytes in read/write commands @@ -51,7 +51,7 @@ * @block_size: Write block size (in bytes). * Write commands are limited to blocks with this size and alignment. */ -struct efx_spi_device { +struct falcon_spi_device { int device_id; unsigned int size; unsigned int addr_len; @@ -61,21 +61,21 @@ struct efx_spi_device { unsigned int block_size; }; -static inline bool efx_spi_present(const struct efx_spi_device *spi) +static inline bool falcon_spi_present(const struct falcon_spi_device *spi) { return spi->size != 0; } int falcon_spi_cmd(struct efx_nic *efx, - const struct efx_spi_device *spi, unsigned int command, + const struct falcon_spi_device *spi, unsigned int command, int address, const void *in, void *out, size_t len); int falcon_spi_wait_write(struct efx_nic *efx, - const struct efx_spi_device *spi); + const struct falcon_spi_device *spi); int falcon_spi_read(struct efx_nic *efx, - const struct efx_spi_device *spi, loff_t start, + const struct falcon_spi_device *spi, loff_t start, size_t len, size_t *retlen, u8 *buffer); int falcon_spi_write(struct efx_nic *efx, - const struct efx_spi_device *spi, loff_t start, + const struct falcon_spi_device *spi, loff_t start, size_t len, size_t *retlen, const u8 *buffer); /* @@ -93,7 +93,7 @@ int falcon_spi_write(struct efx_nic *efx, */ #define FALCON_NVCONFIG_END 0x400U #define FALCON_FLASH_BOOTCODE_START 0x8000U -#define EFX_EEPROM_BOOTCONFIG_START 0x800U -#define EFX_EEPROM_BOOTCONFIG_END 0x1800U +#define FALCON_EEPROM_BOOTCONFIG_START 0x800U +#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U #endif /* EFX_SPI_H */ -- cgit v0.10.2 From b766630b351c68c0383831dba9b81a905e5e84c6 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 28 Nov 2012 04:38:10 +0000 Subject: sfc: Eliminate struct efx_mtd Currently we use struct efx_mtd to represent a physical NVRAM device and struct efx_mtd_partition to represent a partition on that device. But this only really makes sense for Falcon, as we don't know or care whether MC-managed NVRAM partitions are on one or more physical devices. It complicates iteration and provides little benefit. Therefore: - Replace the pointer to efx_mtd in mtd_info::priv with a pointer to efx_nic - Move the falcon_spi_device pointer into the union in struct efx_mtd_partition - Move the device name to efx_mtd_partition::dev_type_name - Move the efx_mtd_ops pointer to efx_nic::mtd_ops - Make efx_nic::mtd_list a list of partitions Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index e4b35c6..ba6c87a 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -25,6 +25,7 @@ #define FALCON_SPI_VERIFY_BUF_LEN 16 struct efx_mtd_partition { + struct list_head node; struct mtd_info mtd; union { struct { @@ -32,8 +33,12 @@ struct efx_mtd_partition { u8 nvram_type; u16 fw_subtype; } mcdi; - size_t offset; + struct { + const struct falcon_spi_device *spi; + size_t offset; + } falcon; }; + const char *dev_type_name; const char *type_name; char name[IFNAMSIZ + 20]; }; @@ -47,21 +52,6 @@ struct efx_mtd_ops { int (*sync)(struct mtd_info *mtd); }; -struct efx_mtd { - struct list_head node; - struct efx_nic *efx; - const struct falcon_spi_device *spi; - const char *name; - const struct efx_mtd_ops *ops; - size_t n_parts; - struct efx_mtd_partition part[0]; -}; - -#define efx_for_each_partition(part, efx_mtd) \ - for ((part) = &(efx_mtd)->part[0]; \ - (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \ - (part)++) - #define to_efx_mtd_partition(mtd) \ container_of(mtd, struct efx_mtd_partition, mtd) @@ -73,9 +63,8 @@ static int siena_mtd_probe(struct efx_nic *efx); static int falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) { - struct efx_mtd *efx_mtd = part->mtd.priv; - const struct falcon_spi_device *spi = efx_mtd->spi; - struct efx_nic *efx = efx_mtd->efx; + const struct falcon_spi_device *spi = part->falcon.spi; + struct efx_nic *efx = part->mtd.priv; u8 status; int rc, i; @@ -93,7 +82,8 @@ falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) if (signal_pending(current)) return -EINTR; } - pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name); + pr_err("%s: timed out waiting for %s\n", + part->name, part->dev_type_name); return -ETIMEDOUT; } @@ -135,9 +125,8 @@ falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi) static int falcon_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) { - struct efx_mtd *efx_mtd = part->mtd.priv; - const struct falcon_spi_device *spi = efx_mtd->spi; - struct efx_nic *efx = efx_mtd->efx; + const struct falcon_spi_device *spi = part->falcon.spi; + struct efx_nic *efx = part->mtd.priv; unsigned pos, block_len; u8 empty[FALCON_SPI_VERIFY_BUF_LEN]; u8 buffer[FALCON_SPI_VERIFY_BUF_LEN]; @@ -185,10 +174,10 @@ falcon_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) { - struct efx_mtd *efx_mtd = mtd->priv; + struct efx_nic *efx = mtd->priv; int rc; - rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len); + rc = efx->mtd_ops->erase(mtd, erase->addr, erase->len); if (rc == 0) { erase->state = MTD_ERASE_DONE; } else { @@ -202,13 +191,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) static void efx_mtd_sync(struct mtd_info *mtd) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; + struct efx_nic *efx = mtd->priv; int rc; - rc = efx_mtd->ops->sync(mtd); + rc = efx->mtd_ops->sync(mtd); if (rc) pr_err("%s: %s sync failed (%d)\n", - part->name, efx_mtd->name, rc); + part->name, part->dev_type_name, rc); } static void efx_mtd_remove_partition(struct efx_mtd_partition *part) @@ -222,86 +211,84 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part) ssleep(1); } WARN_ON(rc); + list_del(&part->node); } -static void efx_mtd_remove_device(struct efx_mtd *efx_mtd) -{ - struct efx_mtd_partition *part; - - efx_for_each_partition(part, efx_mtd) - efx_mtd_remove_partition(part); - list_del(&efx_mtd->node); - kfree(efx_mtd); -} - -static void efx_mtd_rename_device(struct efx_mtd *efx_mtd) +static void efx_mtd_rename_partition(struct efx_mtd_partition *part) { - struct efx_mtd_partition *part; + struct efx_nic *efx = part->mtd.priv; - efx_for_each_partition(part, efx_mtd) - if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0) - snprintf(part->name, sizeof(part->name), - "%s %s:%02x", efx_mtd->efx->name, - part->type_name, part->mcdi.fw_subtype); - else - snprintf(part->name, sizeof(part->name), - "%s %s", efx_mtd->efx->name, - part->type_name); + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) + snprintf(part->name, sizeof(part->name), "%s %s:%02x", + efx->name, part->type_name, part->mcdi.fw_subtype); + else + snprintf(part->name, sizeof(part->name), "%s %s", + efx->name, part->type_name); } -static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd) +static int efx_mtd_add(struct efx_nic *efx, + struct efx_mtd_partition *parts, size_t n_parts) { struct efx_mtd_partition *part; + size_t i; - efx_mtd->efx = efx; + for (i = 0; i < n_parts; i++) { + part = &parts[i]; - efx_mtd_rename_device(efx_mtd); - - efx_for_each_partition(part, efx_mtd) { part->mtd.writesize = 1; part->mtd.owner = THIS_MODULE; - part->mtd.priv = efx_mtd; + part->mtd.priv = efx; part->mtd.name = part->name; part->mtd._erase = efx_mtd_erase; - part->mtd._read = efx_mtd->ops->read; - part->mtd._write = efx_mtd->ops->write; + part->mtd._read = efx->mtd_ops->read; + part->mtd._write = efx->mtd_ops->write; part->mtd._sync = efx_mtd_sync; + efx_mtd_rename_partition(part); + if (mtd_device_register(&part->mtd, NULL, 0)) goto fail; + + /* Add to list in order - efx_mtd_remove() depends on this */ + list_add_tail(&part->node, &efx->mtd_list); } - list_add(&efx_mtd->node, &efx->mtd_list); return 0; fail: - while (part != &efx_mtd->part[0]) { - --part; - efx_mtd_remove_partition(part); - } + while (i--) + efx_mtd_remove_partition(&parts[i]); /* Failure is unlikely here, but probably means we're out of memory */ return -ENOMEM; } void efx_mtd_remove(struct efx_nic *efx) { - struct efx_mtd *efx_mtd, *next; + struct efx_mtd_partition *parts, *part, *next; WARN_ON(efx_dev_registered(efx)); - list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node) - efx_mtd_remove_device(efx_mtd); + if (list_empty(&efx->mtd_list)) + return; + + parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition, + node); + + list_for_each_entry_safe(part, next, &efx->mtd_list, node) + efx_mtd_remove_partition(part); + + kfree(parts); } void efx_mtd_rename(struct efx_nic *efx) { - struct efx_mtd *efx_mtd; + struct efx_mtd_partition *part; ASSERT_RTNL(); - list_for_each_entry(efx_mtd, &efx->mtd_list, node) - efx_mtd_rename_device(efx_mtd); + list_for_each_entry(part, &efx->mtd_list, node) + efx_mtd_rename_partition(part); } int efx_mtd_probe(struct efx_nic *efx) @@ -318,17 +305,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - const struct falcon_spi_device *spi = efx_mtd->spi; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; rc = mutex_lock_interruptible(&nic_data->spi_lock); if (rc) return rc; - rc = falcon_spi_read(efx, spi, part->offset + start, len, - retlen, buffer); + rc = falcon_spi_read(efx, part->falcon.spi, part->falcon.offset + start, + len, retlen, buffer); mutex_unlock(&nic_data->spi_lock); return rc; } @@ -336,15 +321,14 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; rc = mutex_lock_interruptible(&nic_data->spi_lock); if (rc) return rc; - rc = falcon_spi_erase(part, part->offset + start, len); + rc = falcon_spi_erase(part, part->falcon.offset + start, len); mutex_unlock(&nic_data->spi_lock); return rc; } @@ -353,17 +337,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - const struct falcon_spi_device *spi = efx_mtd->spi; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; rc = mutex_lock_interruptible(&nic_data->spi_lock); if (rc) return rc; - rc = falcon_spi_write(efx, spi, part->offset + start, len, - retlen, buffer); + rc = falcon_spi_write(efx, part->falcon.spi, + part->falcon.offset + start, len, retlen, buffer); mutex_unlock(&nic_data->spi_lock); return rc; } @@ -371,8 +353,7 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, static int falcon_mtd_sync(struct mtd_info *mtd) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; @@ -392,66 +373,50 @@ static const struct efx_mtd_ops falcon_mtd_ops = { static int falcon_mtd_probe(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; + struct efx_mtd_partition *parts; struct falcon_spi_device *spi; - struct efx_mtd *efx_mtd; + size_t n_parts; int rc = -ENODEV; ASSERT_RTNL(); + efx->mtd_ops = &falcon_mtd_ops; + + /* Allocate space for maximum number of partitions */ + parts = kcalloc(2, sizeof(*parts), GFP_KERNEL); + n_parts = 0; + spi = &nic_data->spi_flash; if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { - efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), - GFP_KERNEL); - if (!efx_mtd) - return -ENOMEM; - - efx_mtd->spi = spi; - efx_mtd->name = "flash"; - efx_mtd->ops = &falcon_mtd_ops; - - efx_mtd->n_parts = 1; - efx_mtd->part[0].mtd.type = MTD_NORFLASH; - efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; - efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; - efx_mtd->part[0].mtd.erasesize = spi->erase_size; - efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; - efx_mtd->part[0].type_name = "sfc_flash_bootrom"; - - rc = efx_mtd_probe_device(efx, efx_mtd); - if (rc) { - kfree(efx_mtd); - return rc; - } + parts[n_parts].falcon.spi = spi; + parts[n_parts].falcon.offset = FALCON_FLASH_BOOTCODE_START; + parts[n_parts].dev_type_name = "flash"; + parts[n_parts].type_name = "sfc_flash_bootrom"; + parts[n_parts].mtd.type = MTD_NORFLASH; + parts[n_parts].mtd.flags = MTD_CAP_NORFLASH; + parts[n_parts].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; + parts[n_parts].mtd.erasesize = spi->erase_size; + n_parts++; } spi = &nic_data->spi_eeprom; if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) { - efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), - GFP_KERNEL); - if (!efx_mtd) - return -ENOMEM; - - efx_mtd->spi = spi; - efx_mtd->name = "EEPROM"; - efx_mtd->ops = &falcon_mtd_ops; - - efx_mtd->n_parts = 1; - efx_mtd->part[0].mtd.type = MTD_RAM; - efx_mtd->part[0].mtd.flags = MTD_CAP_RAM; - efx_mtd->part[0].mtd.size = + parts[n_parts].falcon.spi = spi; + parts[n_parts].falcon.offset = FALCON_EEPROM_BOOTCONFIG_START; + parts[n_parts].dev_type_name = "EEPROM"; + parts[n_parts].type_name = "sfc_bootconfig"; + parts[n_parts].mtd.type = MTD_RAM; + parts[n_parts].mtd.flags = MTD_CAP_RAM; + parts[n_parts].mtd.size = min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) - FALCON_EEPROM_BOOTCONFIG_START; - efx_mtd->part[0].mtd.erasesize = spi->erase_size; - efx_mtd->part[0].offset = FALCON_EEPROM_BOOTCONFIG_START; - efx_mtd->part[0].type_name = "sfc_bootconfig"; - - rc = efx_mtd_probe_device(efx, efx_mtd); - if (rc) { - kfree(efx_mtd); - return rc; - } + parts[n_parts].mtd.erasesize = spi->erase_size; + n_parts++; } + rc = efx_mtd_add(efx, parts, n_parts); + if (rc) + kfree(parts); return rc; } @@ -461,8 +426,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; loff_t offset = start; loff_t end = min_t(loff_t, start + len, mtd->size); size_t chunk; @@ -485,8 +449,7 @@ out: static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); loff_t end = min_t(loff_t, start + len, mtd->size); size_t chunk = part->mtd.erasesize; @@ -517,8 +480,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; loff_t offset = start; loff_t end = min_t(loff_t, start + len, mtd->size); size_t chunk; @@ -548,8 +510,7 @@ out: static int siena_mtd_sync(struct mtd_info *mtd) { struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); - struct efx_mtd *efx_mtd = mtd->priv; - struct efx_nic *efx = efx_mtd->efx; + struct efx_nic *efx = mtd->priv; int rc = 0; if (part->mcdi.updating) { @@ -589,11 +550,9 @@ static const struct siena_nvram_type_info siena_nvram_types[] = { }; static int siena_mtd_probe_partition(struct efx_nic *efx, - struct efx_mtd *efx_mtd, - unsigned int part_id, + struct efx_mtd_partition *part, unsigned int type) { - struct efx_mtd_partition *part = &efx_mtd->part[part_id]; const struct siena_nvram_type_info *info; size_t size, erase_size; bool protected; @@ -615,6 +574,7 @@ static int siena_mtd_probe_partition(struct efx_nic *efx, return -ENODEV; /* hide it */ part->mcdi.nvram_type = type; + part->dev_type_name = "Siena NVRAM manager"; part->type_name = info->name; part->mtd.type = MTD_NORFLASH; @@ -626,55 +586,54 @@ static int siena_mtd_probe_partition(struct efx_nic *efx, } static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, - struct efx_mtd *efx_mtd) + struct efx_mtd_partition *parts, + size_t n_parts) { - struct efx_mtd_partition *part; uint16_t fw_subtype_list[ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; + size_t i; int rc; rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); if (rc) return rc; - efx_for_each_partition(part, efx_mtd) - part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type]; + for (i = 0; i < n_parts; i++) + parts[i].mcdi.fw_subtype = + fw_subtype_list[parts[i].mcdi.nvram_type]; return 0; } static int siena_mtd_probe(struct efx_nic *efx) { - struct efx_mtd *efx_mtd; - int rc = -ENODEV; + struct efx_mtd_partition *parts; u32 nvram_types; unsigned int type; + size_t n_parts; + int rc; ASSERT_RTNL(); + efx->mtd_ops = &siena_mtd_ops; + rc = efx_mcdi_nvram_types(efx, &nvram_types); if (rc) return rc; - efx_mtd = kzalloc(sizeof(*efx_mtd) + - hweight32(nvram_types) * sizeof(efx_mtd->part[0]), - GFP_KERNEL); - if (!efx_mtd) + parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL); + if (!parts) return -ENOMEM; - efx_mtd->name = "Siena NVRAM manager"; - - efx_mtd->ops = &siena_mtd_ops; - type = 0; - efx_mtd->n_parts = 0; + n_parts = 0; while (nvram_types != 0) { if (nvram_types & 1) { - rc = siena_mtd_probe_partition(efx, efx_mtd, - efx_mtd->n_parts, type); + rc = siena_mtd_probe_partition(efx, &parts[n_parts], + type); if (rc == 0) - efx_mtd->n_parts++; + n_parts++; else if (rc != -ENODEV) goto fail; } @@ -682,14 +641,14 @@ static int siena_mtd_probe(struct efx_nic *efx) nvram_types >>= 1; } - rc = siena_mtd_get_fw_subtypes(efx, efx_mtd); + rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts); if (rc) goto fail; - rc = efx_mtd_probe_device(efx, efx_mtd); + rc = efx_mtd_add(efx, parts, n_parts); fail: if (rc) - kfree(efx_mtd); + kfree(parts); return rc; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 555dd01..958ede1 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -868,6 +868,7 @@ struct efx_nic { struct delayed_work selftest_work; #ifdef CONFIG_SFC_MTD + const struct efx_mtd_ops *mtd_ops; struct list_head mtd_list; #endif -- cgit v0.10.2 From 141d748e70a22629ef1e1823f88b3d5741ac38af Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 28 Nov 2012 04:38:14 +0000 Subject: sfc: Move NIC-type-specific MTD partition date into separate structures Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index ba6c87a..f5819c7 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -27,23 +27,19 @@ struct efx_mtd_partition { struct list_head node; struct mtd_info mtd; - union { - struct { - bool updating; - u8 nvram_type; - u16 fw_subtype; - } mcdi; - struct { - const struct falcon_spi_device *spi; - size_t offset; - } falcon; - }; const char *dev_type_name; const char *type_name; char name[IFNAMSIZ + 20]; }; +struct falcon_mtd_partition { + struct efx_mtd_partition common; + const struct falcon_spi_device *spi; + size_t offset; +}; + struct efx_mtd_ops { + void (*rename)(struct efx_mtd_partition *part); int (*read)(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer); int (*erase)(struct mtd_info *mtd, loff_t start, size_t len); @@ -55,16 +51,19 @@ struct efx_mtd_ops { #define to_efx_mtd_partition(mtd) \ container_of(mtd, struct efx_mtd_partition, mtd) +#define to_falcon_mtd_partition(mtd) \ + container_of(mtd, struct falcon_mtd_partition, common.mtd) + static int falcon_mtd_probe(struct efx_nic *efx); static int siena_mtd_probe(struct efx_nic *efx); /* SPI utilities */ static int -falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) +falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible) { - const struct falcon_spi_device *spi = part->falcon.spi; - struct efx_nic *efx = part->mtd.priv; + const struct falcon_spi_device *spi = part->spi; + struct efx_nic *efx = part->common.mtd.priv; u8 status; int rc, i; @@ -83,7 +82,7 @@ falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) return -EINTR; } pr_err("%s: timed out waiting for %s\n", - part->name, part->dev_type_name); + part->common.name, part->common.dev_type_name); return -ETIMEDOUT; } @@ -123,10 +122,10 @@ falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi) } static int -falcon_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) +falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len) { - const struct falcon_spi_device *spi = part->falcon.spi; - struct efx_nic *efx = part->mtd.priv; + const struct falcon_spi_device *spi = part->spi; + struct efx_nic *efx = part->common.mtd.priv; unsigned pos, block_len; u8 empty[FALCON_SPI_VERIFY_BUF_LEN]; u8 buffer[FALCON_SPI_VERIFY_BUF_LEN]; @@ -218,22 +217,18 @@ static void efx_mtd_rename_partition(struct efx_mtd_partition *part) { struct efx_nic *efx = part->mtd.priv; - if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) - snprintf(part->name, sizeof(part->name), "%s %s:%02x", - efx->name, part->type_name, part->mcdi.fw_subtype); - else - snprintf(part->name, sizeof(part->name), "%s %s", - efx->name, part->type_name); + efx->mtd_ops->rename(part); } -static int efx_mtd_add(struct efx_nic *efx, - struct efx_mtd_partition *parts, size_t n_parts) +static int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part) { struct efx_mtd_partition *part; size_t i; for (i = 0; i < n_parts; i++) { - part = &parts[i]; + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); part->mtd.writesize = 1; @@ -257,8 +252,11 @@ static int efx_mtd_add(struct efx_nic *efx, return 0; fail: - while (i--) - efx_mtd_remove_partition(&parts[i]); + while (i--) { + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); + efx_mtd_remove_partition(part); + } /* Failure is unlikely here, but probably means we're out of memory */ return -ENOMEM; } @@ -301,10 +299,18 @@ int efx_mtd_probe(struct efx_nic *efx) /* Implementation of MTD operations for Falcon */ +static void falcon_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s", + efx->name, part->type_name); +} + static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; @@ -312,7 +318,7 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, rc = mutex_lock_interruptible(&nic_data->spi_lock); if (rc) return rc; - rc = falcon_spi_read(efx, part->falcon.spi, part->falcon.offset + start, + rc = falcon_spi_read(efx, part->spi, part->offset + start, len, retlen, buffer); mutex_unlock(&nic_data->spi_lock); return rc; @@ -320,7 +326,7 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; @@ -328,7 +334,7 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) rc = mutex_lock_interruptible(&nic_data->spi_lock); if (rc) return rc; - rc = falcon_spi_erase(part, part->falcon.offset + start, len); + rc = falcon_spi_erase(part, part->offset + start, len); mutex_unlock(&nic_data->spi_lock); return rc; } @@ -336,7 +342,7 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; @@ -344,15 +350,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, rc = mutex_lock_interruptible(&nic_data->spi_lock); if (rc) return rc; - rc = falcon_spi_write(efx, part->falcon.spi, - part->falcon.offset + start, len, retlen, buffer); + rc = falcon_spi_write(efx, part->spi, part->offset + start, + len, retlen, buffer); mutex_unlock(&nic_data->spi_lock); return rc; } static int falcon_mtd_sync(struct mtd_info *mtd) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; struct falcon_nic_data *nic_data = efx->nic_data; int rc; @@ -364,6 +370,7 @@ static int falcon_mtd_sync(struct mtd_info *mtd) } static const struct efx_mtd_ops falcon_mtd_ops = { + .rename = falcon_mtd_rename, .read = falcon_mtd_read, .erase = falcon_mtd_erase, .write = falcon_mtd_write, @@ -373,7 +380,7 @@ static const struct efx_mtd_ops falcon_mtd_ops = { static int falcon_mtd_probe(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; - struct efx_mtd_partition *parts; + struct falcon_mtd_partition *parts; struct falcon_spi_device *spi; size_t n_parts; int rc = -ENODEV; @@ -388,33 +395,33 @@ static int falcon_mtd_probe(struct efx_nic *efx) spi = &nic_data->spi_flash; if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { - parts[n_parts].falcon.spi = spi; - parts[n_parts].falcon.offset = FALCON_FLASH_BOOTCODE_START; - parts[n_parts].dev_type_name = "flash"; - parts[n_parts].type_name = "sfc_flash_bootrom"; - parts[n_parts].mtd.type = MTD_NORFLASH; - parts[n_parts].mtd.flags = MTD_CAP_NORFLASH; - parts[n_parts].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; - parts[n_parts].mtd.erasesize = spi->erase_size; + parts[n_parts].spi = spi; + parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START; + parts[n_parts].common.dev_type_name = "flash"; + parts[n_parts].common.type_name = "sfc_flash_bootrom"; + parts[n_parts].common.mtd.type = MTD_NORFLASH; + parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH; + parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; + parts[n_parts].common.mtd.erasesize = spi->erase_size; n_parts++; } spi = &nic_data->spi_eeprom; if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) { - parts[n_parts].falcon.spi = spi; - parts[n_parts].falcon.offset = FALCON_EEPROM_BOOTCONFIG_START; - parts[n_parts].dev_type_name = "EEPROM"; - parts[n_parts].type_name = "sfc_bootconfig"; - parts[n_parts].mtd.type = MTD_RAM; - parts[n_parts].mtd.flags = MTD_CAP_RAM; - parts[n_parts].mtd.size = + parts[n_parts].spi = spi; + parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START; + parts[n_parts].common.dev_type_name = "EEPROM"; + parts[n_parts].common.type_name = "sfc_bootconfig"; + parts[n_parts].common.mtd.type = MTD_RAM; + parts[n_parts].common.mtd.flags = MTD_CAP_RAM; + parts[n_parts].common.mtd.size = min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) - FALCON_EEPROM_BOOTCONFIG_START; - parts[n_parts].mtd.erasesize = spi->erase_size; + parts[n_parts].common.mtd.erasesize = spi->erase_size; n_parts++; } - rc = efx_mtd_add(efx, parts, n_parts); + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); if (rc) kfree(parts); return rc; @@ -422,10 +429,30 @@ static int falcon_mtd_probe(struct efx_nic *efx) /* Implementation of MTD operations for Siena */ +struct efx_mcdi_mtd_partition { + struct efx_mtd_partition common; + bool updating; + u8 nvram_type; + u16 fw_subtype; +}; + +#define to_efx_mcdi_mtd_partition(mtd) \ + container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd) + +static void siena_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_mcdi_mtd_partition *mcdi_part = + container_of(part, struct efx_mcdi_mtd_partition, common); + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s:%02x", + efx->name, part->type_name, mcdi_part->fw_subtype); +} + static int siena_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; loff_t offset = start; loff_t end = min_t(loff_t, start + len, mtd->size); @@ -434,7 +461,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start, while (offset < end) { chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); - rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, + rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, buffer, chunk); if (rc) goto out; @@ -448,25 +475,25 @@ out: static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); loff_t end = min_t(loff_t, start + len, mtd->size); - size_t chunk = part->mtd.erasesize; + size_t chunk = part->common.mtd.erasesize; int rc = 0; - if (!part->mcdi.updating) { - rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); if (rc) goto out; - part->mcdi.updating = true; + part->updating = true; } /* The MCDI interface can in fact do multiple erase blocks at once; * but erasing may be slow, so we make multiple calls here to avoid * tripping the MCDI RPC timeout. */ while (offset < end) { - rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset, + rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, chunk); if (rc) goto out; @@ -479,23 +506,23 @@ out: static int siena_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; loff_t offset = start; loff_t end = min_t(loff_t, start + len, mtd->size); size_t chunk; int rc = 0; - if (!part->mcdi.updating) { - rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type); + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); if (rc) goto out; - part->mcdi.updating = true; + part->updating = true; } while (offset < end) { chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); - rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, + rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, buffer, chunk); if (rc) goto out; @@ -509,19 +536,20 @@ out: static int siena_mtd_sync(struct mtd_info *mtd) { - struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; int rc = 0; - if (part->mcdi.updating) { - part->mcdi.updating = false; - rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type); + if (part->updating) { + part->updating = false; + rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); } return rc; } static const struct efx_mtd_ops siena_mtd_ops = { + .rename = siena_mtd_rename, .read = siena_mtd_read, .erase = siena_mtd_erase, .write = siena_mtd_write, @@ -550,7 +578,7 @@ static const struct siena_nvram_type_info siena_nvram_types[] = { }; static int siena_mtd_probe_partition(struct efx_nic *efx, - struct efx_mtd_partition *part, + struct efx_mcdi_mtd_partition *part, unsigned int type) { const struct siena_nvram_type_info *info; @@ -573,20 +601,20 @@ static int siena_mtd_probe_partition(struct efx_nic *efx, if (protected) return -ENODEV; /* hide it */ - part->mcdi.nvram_type = type; - part->dev_type_name = "Siena NVRAM manager"; - part->type_name = info->name; + part->nvram_type = type; + part->common.dev_type_name = "Siena NVRAM manager"; + part->common.type_name = info->name; - part->mtd.type = MTD_NORFLASH; - part->mtd.flags = MTD_CAP_NORFLASH; - part->mtd.size = size; - part->mtd.erasesize = erase_size; + part->common.mtd.type = MTD_NORFLASH; + part->common.mtd.flags = MTD_CAP_NORFLASH; + part->common.mtd.size = size; + part->common.mtd.erasesize = erase_size; return 0; } static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, - struct efx_mtd_partition *parts, + struct efx_mcdi_mtd_partition *parts, size_t n_parts) { uint16_t fw_subtype_list[ @@ -599,15 +627,14 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, return rc; for (i = 0; i < n_parts; i++) - parts[i].mcdi.fw_subtype = - fw_subtype_list[parts[i].mcdi.nvram_type]; + parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type]; return 0; } static int siena_mtd_probe(struct efx_nic *efx) { - struct efx_mtd_partition *parts; + struct efx_mcdi_mtd_partition *parts; u32 nvram_types; unsigned int type; size_t n_parts; @@ -645,7 +672,7 @@ static int siena_mtd_probe(struct efx_nic *efx) if (rc) goto fail; - rc = efx_mtd_add(efx, parts, n_parts); + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); fail: if (rc) kfree(parts); -- cgit v0.10.2 From 91a3fa39ddf2f85a15cb20ccc3a54c1f0497af1e Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 00:15:49 +0200 Subject: rt2x00: rt2800: rename HW_BEACON_OFFSET macro The name of the HW_BEACON_OFFSET macro is a bit confusing. It returns with one of the HW_BEACON_BASE* values, so rename the macro to HW_BEACON_BASE to reflect that. The patch contains no functional changes. Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index a313241..6e69b96 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2019,7 +2019,7 @@ struct mac_iveiv_entry { #define HW_BEACON_BASE6 0x5dc0 #define HW_BEACON_BASE7 0x5bc0 -#define HW_BEACON_OFFSET(__index) \ +#define HW_BEACON_BASE(__index) \ (((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \ (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \ (HW_BEACON_BASE6 - ((__index - 6) * 0x0200)))) diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 313da6a..704fcfb 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -992,7 +992,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) return; } - beacon_base = HW_BEACON_OFFSET(entry->entry_idx); + beacon_base = HW_BEACON_BASE(entry->entry_idx); rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, entry->skb->len + padding_len); @@ -1042,7 +1042,7 @@ void rt2800_clear_beacon(struct queue_entry *entry) * Clear beacon. */ rt2800_clear_beacon_register(rt2x00dev, - HW_BEACON_OFFSET(entry->entry_idx)); + HW_BEACON_BASE(entry->entry_idx)); /* * Enabled beaconing again. -- cgit v0.10.2 From 77f7c0f3b8f2d464e841c5c35f3da8b4999a885c Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 00:15:50 +0200 Subject: rt2x00: rt2800lib: pass beacon index to rt2800_clear_beacon_register Instead of precomputing the beacon base in each caller, pass the beacon index to the 'rt2800_clear_beacon_register' function and compute the beacon base in there. This allows to simplify the caller functions a bit. Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 704fcfb..2f64034 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1011,10 +1011,13 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) EXPORT_SYMBOL_GPL(rt2800_write_beacon); static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev, - unsigned int beacon_base) + unsigned int index) { int i; const int txwi_desc_size = rt2x00dev->bcn->winfo_size; + unsigned int beacon_base; + + beacon_base = HW_BEACON_BASE(index); /* * For the Beacon base registers we only need to clear @@ -1041,8 +1044,7 @@ void rt2800_clear_beacon(struct queue_entry *entry) /* * Clear beacon. */ - rt2800_clear_beacon_register(rt2x00dev, - HW_BEACON_BASE(entry->entry_idx)); + rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx); /* * Enabled beaconing again. @@ -4803,14 +4805,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) /* * Clear all beacons */ - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0); - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1); - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2); - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3); - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4); - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5); - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6); - rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7); + for (i = 0; i < 8; i++) + rt2800_clear_beacon_register(rt2x00dev, i); if (rt2x00_is_usb(rt2x00dev)) { rt2800_register_read(rt2x00dev, US_CYC_CNT, ®); -- cgit v0.10.2 From c1fada4e5e53d88a8edd3ff01cee9d316cbf6025 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 14:09:28 +0200 Subject: rt2x00: rt2800lib: fix frequency offset boundary calculation The current code in the 'rt2800_adjust_freq_offset' function limits the device specific frequency offset value to FREQ_BOUND but ignores the fact that the uppermost bit is not part of the frequency offset value. As the result, the driver always uses the FREQ_BOUND value if the uppermost bit is set. Update the code to use the correct source value for calculating the boundary. Based on the DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: RTMPAdjustFrequencyOffset function in common/rt_rf.c Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 2f64034..de27277 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2496,13 +2496,14 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) { + u8 freq_offset; u8 rfcsr; + freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE); + freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND); + rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); - if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND) - rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND); - else - rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset); + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset); rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); } -- cgit v0.10.2 From 6af1bdccabe956a08a37f2ae049d37307ec0c91c Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 14:09:29 +0200 Subject: rt2x00: rt2800lib: optimize frequency offset adjustment Don't write the new value into the register if it is the same as the old value to avoid unncessary USB bus traffic with USB devices. The change also saves a few cycle on MMIO based devices. Based on the DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: RTMPAdjustFrequencyOffset function in common/rt_rf.c Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index de27277..3407ac9 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2497,13 +2497,18 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) { u8 freq_offset; - u8 rfcsr; + u8 rfcsr, prev_rfcsr; freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE); freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND); rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); + prev_rfcsr = rfcsr; + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset); + if (rfcsr == prev_rfcsr) + return; + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); } -- cgit v0.10.2 From 76773f301f2210dcc20c466aebda7118062673eb Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 14:09:30 +0200 Subject: rt2x00: rt2800lib: use a MCU command for frequency adjustment on USB devices According to the Ralink driver, there is an MCU command which can be used to send the frequency offset value directly to the USB device without going through the RFCSR writing sequence. Based on the DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: RTMPAdjustFrequencyOffset function in common/rt_rf.c Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 6e69b96..e25e5bf 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2794,6 +2794,7 @@ enum rt2800_eeprom_word { #define MCU_RADAR 0x60 #define MCU_BOOT_SIGNAL 0x72 #define MCU_ANT_SELECT 0X73 +#define MCU_FREQ_OFFSET 0x74 #define MCU_BBP_SIGNAL 0x80 #define MCU_POWER_SAVE 0x83 #define MCU_BAND_SELECT 0x91 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 3407ac9..bebc56f 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2509,7 +2509,11 @@ static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) if (rfcsr == prev_rfcsr) return; - rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); + if (rt2x00_is_usb(rt2x00dev)) + rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff, + freq_offset, prev_rfcsr); + else + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); } static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, -- cgit v0.10.2 From 8d38eca8e089179b6858ca5f3ea03f571a5892a5 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 14:09:31 +0200 Subject: rt2x00: rt2800lib: use step-by-step frequency offset adjustment on MMIO devices According to the DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver, the RFCSR17 register can't be programmed in one step on devices which are using the frequency offset adjustment code. Update the code to use step-by-step adjustment. Reference: RT30xxWriteRFRegister function in common/rt_rf.c Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index bebc56f..623ad9d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2496,7 +2496,7 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) { - u8 freq_offset; + u8 freq_offset, prev_freq_offset; u8 rfcsr, prev_rfcsr; freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE); @@ -2509,11 +2509,24 @@ static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) if (rfcsr == prev_rfcsr) return; - if (rt2x00_is_usb(rt2x00dev)) + if (rt2x00_is_usb(rt2x00dev)) { rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff, freq_offset, prev_rfcsr); - else + return; + } + + prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE); + while (prev_freq_offset != freq_offset) { + if (prev_freq_offset < freq_offset) + prev_freq_offset++; + else + prev_freq_offset--; + + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset); rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); + + usleep_range(1000, 1500); + } } static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, -- cgit v0.10.2 From 3f1b8739a498c7570ca2fae6c49fd1561ef2358c Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 14:09:32 +0200 Subject: rt2x00: rt2800lib: move rt2800_adjust_freq_offset function Move the rt2800_adjust_freq_offset function before the channel configuration functions to make it usable from those without a forward declaration. The patch contains no functional changes. Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 623ad9d..1b81fc9 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1875,6 +1875,43 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, rt2x00dev->lna_gain = lna_gain; } +#define FREQ_OFFSET_BOUND 0x5f + +static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) +{ + u8 freq_offset, prev_freq_offset; + u8 rfcsr, prev_rfcsr; + + freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE); + freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND); + + rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); + prev_rfcsr = rfcsr; + + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset); + if (rfcsr == prev_rfcsr) + return; + + if (rt2x00_is_usb(rt2x00dev)) { + rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff, + freq_offset, prev_rfcsr); + return; + } + + prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE); + while (prev_freq_offset != freq_offset) { + if (prev_freq_offset < freq_offset) + prev_freq_offset++; + else + prev_freq_offset--; + + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset); + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); + + usleep_range(1000, 1500); + } +} + static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, @@ -2492,42 +2529,6 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, #define POWER_BOUND 0x27 #define POWER_BOUND_5G 0x2b -#define FREQ_OFFSET_BOUND 0x5f - -static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev) -{ - u8 freq_offset, prev_freq_offset; - u8 rfcsr, prev_rfcsr; - - freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE); - freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND); - - rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); - prev_rfcsr = rfcsr; - - rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset); - if (rfcsr == prev_rfcsr) - return; - - if (rt2x00_is_usb(rt2x00dev)) { - rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff, - freq_offset, prev_rfcsr); - return; - } - - prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE); - while (prev_freq_offset != freq_offset) { - if (prev_freq_offset < freq_offset) - prev_freq_offset++; - else - prev_freq_offset--; - - rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset); - rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); - - usleep_range(1000, 1500); - } -} static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, -- cgit v0.10.2 From e979a8ab204edbf7b0815162109ee9c85e4d7ea5 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Sat, 17 Aug 2013 14:09:33 +0200 Subject: rt2x00: rt2800lib: adjust frequency offset for RF3053 Along with other chipsets, the Ralink driver uses the frequency adjustment code for RF3053 as well. Remove the bogus place-holder comment from the RF3053 specific channel configuration function and call the frequency adjustment function instead Based on the DPO_RT5572_LinuxSTA_2.6.0.1_20120629 driver. Reference: RT3593_ChipSwitchChannel function in chips/rt3593.c Signed-off-by: Gabor Juhos Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1b81fc9..aa6b6b0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2360,7 +2360,7 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, } rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); - /* TODO: frequency calibration? */ + rt2800_adjust_freq_offset(rt2x00dev); if (conf_is_ht40(conf)) { txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40, -- cgit v0.10.2 From fb5a2dcbbcf19f8ff7e5312b2340460bc03a4b89 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 19 Aug 2013 11:03:43 +0530 Subject: ath9k: Add support for AR9485 1.2 Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index d402cb3..738aa7e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -153,7 +153,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) if (!ah->is_clk_25mhz) INIT_INI_ARRAY(&ah->iniAdditional, ar9340_1p0_radio_core_40M); - } else if (AR_SREV_9485_11(ah)) { + } else if (AR_SREV_9485_11_OR_LATER(ah)) { /* mac */ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9485_1_1_mac_core); @@ -424,7 +424,7 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah) else if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9340Modes_lowest_ob_db_tx_gain_table_1p0); - else if (AR_SREV_9485_11(ah)) + else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485_modes_lowest_ob_db_tx_gain_1_1); else if (AR_SREV_9550(ah)) @@ -458,7 +458,7 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah) else if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9340Modes_high_ob_db_tx_gain_table_1p0); - else if (AR_SREV_9485_11(ah)) + else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_high_ob_db_tx_gain_1_1); else if (AR_SREV_9580(ah)) @@ -492,7 +492,7 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah) else if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9340Modes_low_ob_db_tx_gain_table_1p0); - else if (AR_SREV_9485_11(ah)) + else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_low_ob_db_tx_gain_1_1); else if (AR_SREV_9580(ah)) @@ -517,7 +517,7 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah) else if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9340Modes_high_power_tx_gain_table_1p0); - else if (AR_SREV_9485_11(ah)) + else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_high_power_tx_gain_1_1); else if (AR_SREV_9580(ah)) @@ -552,7 +552,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah) static void ar9003_tx_gain_table_mode5(struct ath_hw *ah) { - if (AR_SREV_9485_11(ah)) + if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_green_ob_db_tx_gain_1_1); else if (AR_SREV_9340(ah)) @@ -571,7 +571,7 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah) if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0); - else if (AR_SREV_9485_11(ah)) + else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_green_spur_ob_db_tx_gain_1_1); else if (AR_SREV_9580(ah)) @@ -611,7 +611,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah) else if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9340Common_rx_gain_table_1p0); - else if (AR_SREV_9485_11(ah)) + else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9485_common_rx_gain_1_1); else if (AR_SREV_9550(ah)) { @@ -644,7 +644,7 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah) else if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9340Common_wo_xlna_rx_gain_table_1p0); - else if (AR_SREV_9485_11(ah)) + else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9485Common_wo_xlna_rx_gain_1_1); else if (AR_SREV_9462_21(ah)) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 18a5aa4..46b910a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1449,7 +1449,7 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); - if (AR_SREV_9485_11(ah)) { + if (AR_SREV_9485_11_OR_LATER(ah)) { /* * Enable LNA diversity. */ diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 5af9744..a13b2d1 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -893,9 +893,9 @@ #define AR_SREV_9485(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485)) -#define AR_SREV_9485_11(_ah) \ - (AR_SREV_9485(_ah) && \ - ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11)) +#define AR_SREV_9485_11_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11)) #define AR_SREV_9485_OR_LATER(_ah) \ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485)) -- cgit v0.10.2 From e083a42ef616b6987c024cccfec72cec75a1f1f5 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 19 Aug 2013 11:04:01 +0530 Subject: ath9k: Add antenna diversity tweak for CUS198 This improves RX diversity and performance for AR9485. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index abdc7ee..a6846ab 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3825,6 +3825,11 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan) else value = ar9003_hw_atten_chain_get_margin(ah, i, chan); + if (ah->config.alt_mingainidx) + REG_RMW_FIELD(ah, AR_PHY_EXT_ATTEN_CTL_0, + AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, + value); + REG_RMW_FIELD(ah, ext_atten_reg[i], AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 23c019d..6fd7523 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -148,6 +148,8 @@ #define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28 #define AR_PHY_EXT_CCA_THRESH62 0x007F0000 #define AR_PHY_EXT_CCA_THRESH62_S 16 +#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX 0x0000FF00 +#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX_S 8 #define AR_PHY_EXT_MINCCA_PWR 0x01FF0000 #define AR_PHY_EXT_MINCCA_PWR_S 16 #define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 64ff8e6..fa543a6 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -314,6 +314,7 @@ struct ath9k_ops_config { u32 xlna_gpio; u32 ant_ctrl_comm2g_switch_enable; bool xatten_margin_cfg; + bool alt_mingainidx; }; enum ath9k_int { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index abf1eb5..19b46c7 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -534,6 +534,7 @@ static void ath9k_init_platform(struct ath_softc *sc) ATH9K_PCI_CUS230)) { ah->config.xlna_gpio = 9; ah->config.xatten_margin_cfg = true; + ah->config.alt_mingainidx = true; ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88; sc->ant_comb.low_rssi_thresh = 20; sc->ant_comb.fast_div_bias = 3; -- cgit v0.10.2 From 1211c961170cedb21c30d5bb7e2033c8720b38db Mon Sep 17 00:00:00 2001 From: Bing Zhao Date: Mon, 19 Aug 2013 16:10:21 -0700 Subject: mwifiex: do not create AP and P2P interfaces upon driver loading Bug 60747 - 1286:2044 [Microsoft Surface Pro] Marvell 88W8797 wifi show 3 interface under network https://bugzilla.kernel.org/show_bug.cgi?id=60747 This issue was also reported previously by OLPC and some folks from the community. There are 3 network interfaces with different types being created when mwifiex driver is loaded: 1. mlan0 (infra. STA) 2. uap0 (AP) 3. p2p0 (P2P_CLIENT) The Network Manager attempts to use all 3 interfaces above without filtering the managed interface type. As the result, 3 identical interfaces are displayed under network manager. If user happens to click on an entry under which its interface is uap0 or p2p0, the association will fail. Work around it by removing the creation of AP and P2P interfaces at driver loading time. These interfaces can be added with 'iw' or other applications manually when they are needed. Signed-off-by: Bing Zhao Signed-off-by: Avinash Patil Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 3402bff..fd77833 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -477,20 +477,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) dev_err(adapter->dev, "cannot create default STA interface\n"); goto err_add_intf; } - - /* Create AP interface by default */ - if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", - NL80211_IFTYPE_AP, NULL, NULL)) { - dev_err(adapter->dev, "cannot create default AP interface\n"); - goto err_add_intf; - } - - /* Create P2P interface by default */ - if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", - NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) { - dev_err(adapter->dev, "cannot create default P2P interface\n"); - goto err_add_intf; - } rtnl_unlock(); mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); -- cgit v0.10.2 From f4d907046c3918005a1e4df0990461183c06e1bc Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 20 Aug 2013 13:05:58 +0530 Subject: ath9k: Add one more PCI ID for CUS198 This is a AR9485/WB225 based card. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 76e8c35..3280798 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -59,6 +59,11 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { PCI_VENDOR_ID_AZWAVE, 0x2126), .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x126A), + .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV }, /* PCI-E CUS230 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, -- cgit v0.10.2 From 02ab8567ef14001d0b2e655bb726f52083bdd101 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:35 +0200 Subject: brcmsmac: cosmetic change in phy_lcn.c Cleaning up some code fragments reducing indentation and uncluttering some lines. Apart from whitespace there are no actual code changes made. Cc: Jonas Gorski Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 3d6b16c..e646ba0 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, gain0_15 = ((biq1 & 0xf) << 12) | ((tia & 0xf) << 8) | ((lna2 & 0x3) << 6) | - ((lna2 & - 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); + ((lna2 & 0x3) << 4) | + ((lna1 & 0x3) << 2) | + ((lna1 & 0x3) << 0); mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); @@ -1368,126 +1369,124 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, goto cal_done; } - if (module == 1) { + WARN_ON(module != 1); + tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); + wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); - tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); - wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); + for (i = 0; i < 11; i++) + values_to_save[i] = + read_radio_reg(pi, rxiq_cal_rf_reg[i]); + Core1TxControl_old = read_phy_reg(pi, 0x631); + + or_phy_reg(pi, 0x631, 0x0015); + + RFOverride0_old = read_phy_reg(pi, 0x44c); + RFOverrideVal0_old = read_phy_reg(pi, 0x44d); + rfoverride2_old = read_phy_reg(pi, 0x4b0); + rfoverride2val_old = read_phy_reg(pi, 0x4b1); + rfoverride3_old = read_phy_reg(pi, 0x4f9); + rfoverride3val_old = read_phy_reg(pi, 0x4fa); + rfoverride4_old = read_phy_reg(pi, 0x938); + rfoverride4val_old = read_phy_reg(pi, 0x939); + afectrlovr_old = read_phy_reg(pi, 0x43b); + afectrlovrval_old = read_phy_reg(pi, 0x43c); + old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); + old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); - for (i = 0; i < 11; i++) - values_to_save[i] = - read_radio_reg(pi, rxiq_cal_rf_reg[i]); - Core1TxControl_old = read_phy_reg(pi, 0x631); - - or_phy_reg(pi, 0x631, 0x0015); - - RFOverride0_old = read_phy_reg(pi, 0x44c); - RFOverrideVal0_old = read_phy_reg(pi, 0x44d); - rfoverride2_old = read_phy_reg(pi, 0x4b0); - rfoverride2val_old = read_phy_reg(pi, 0x4b1); - rfoverride3_old = read_phy_reg(pi, 0x4f9); - rfoverride3val_old = read_phy_reg(pi, 0x4fa); - rfoverride4_old = read_phy_reg(pi, 0x938); - rfoverride4val_old = read_phy_reg(pi, 0x939); - afectrlovr_old = read_phy_reg(pi, 0x43b); - afectrlovrval_old = read_phy_reg(pi, 0x43c); - old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); - old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); - - tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); - if (tx_gain_override_old) { - wlc_lcnphy_get_tx_gain(pi, &old_gains); - tx_gain_index_old = pi_lcn->lcnphy_current_index; - } + tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); + if (tx_gain_override_old) { + wlc_lcnphy_get_tx_gain(pi, &old_gains); + tx_gain_index_old = pi_lcn->lcnphy_current_index; + } - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); - mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); - mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); + mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); - write_radio_reg(pi, RADIO_2064_REG116, 0x06); - write_radio_reg(pi, RADIO_2064_REG12C, 0x07); - write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); - write_radio_reg(pi, RADIO_2064_REG098, 0x03); - write_radio_reg(pi, RADIO_2064_REG00B, 0x7); - mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); - write_radio_reg(pi, RADIO_2064_REG01D, 0x01); - write_radio_reg(pi, RADIO_2064_REG114, 0x01); - write_radio_reg(pi, RADIO_2064_REG02E, 0x10); - write_radio_reg(pi, RADIO_2064_REG12A, 0x08); - - mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); - mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); - mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); - - mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); - - wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); - write_phy_reg(pi, 0x6da, 0xffff); - or_phy_reg(pi, 0x6db, 0x3); - wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); - wlc_lcnphy_rx_gain_override_enable(pi, true); - - tia_gain = 8; - rx_pwr_threshold = 950; - while (tia_gain > 0) { - tia_gain -= 1; - wlc_lcnphy_set_rx_gain_by_distribution(pi, - 0, 0, 2, 2, - (u16) - tia_gain, 1, 0); - udelay(500); + write_radio_reg(pi, RADIO_2064_REG116, 0x06); + write_radio_reg(pi, RADIO_2064_REG12C, 0x07); + write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); + write_radio_reg(pi, RADIO_2064_REG098, 0x03); + write_radio_reg(pi, RADIO_2064_REG00B, 0x7); + mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); + write_radio_reg(pi, RADIO_2064_REG01D, 0x01); + write_radio_reg(pi, RADIO_2064_REG114, 0x01); + write_radio_reg(pi, RADIO_2064_REG02E, 0x10); + write_radio_reg(pi, RADIO_2064_REG12A, 0x08); + + mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); + mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); - received_power = - wlc_lcnphy_measure_digital_power(pi, 2000); - if (received_power < rx_pwr_threshold) - break; - } - result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); + mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); - wlc_lcnphy_stop_tx_tone(pi); + wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); + write_phy_reg(pi, 0x6da, 0xffff); + or_phy_reg(pi, 0x6db, 0x3); + wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); + wlc_lcnphy_rx_gain_override_enable(pi, true); - write_phy_reg(pi, 0x631, Core1TxControl_old); + tia_gain = 8; + rx_pwr_threshold = 950; + while (tia_gain > 0) { + tia_gain -= 1; + wlc_lcnphy_set_rx_gain_by_distribution(pi, + 0, 0, 2, 2, + (u16) + tia_gain, 1, 0); + udelay(500); - write_phy_reg(pi, 0x44c, RFOverrideVal0_old); - write_phy_reg(pi, 0x44d, RFOverrideVal0_old); - write_phy_reg(pi, 0x4b0, rfoverride2_old); - write_phy_reg(pi, 0x4b1, rfoverride2val_old); - write_phy_reg(pi, 0x4f9, rfoverride3_old); - write_phy_reg(pi, 0x4fa, rfoverride3val_old); - write_phy_reg(pi, 0x938, rfoverride4_old); - write_phy_reg(pi, 0x939, rfoverride4val_old); - write_phy_reg(pi, 0x43b, afectrlovr_old); - write_phy_reg(pi, 0x43c, afectrlovrval_old); - write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); - write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); + received_power = + wlc_lcnphy_measure_digital_power(pi, 2000); + if (received_power < rx_pwr_threshold) + break; + } + result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); - wlc_lcnphy_clear_trsw_override(pi); + wlc_lcnphy_stop_tx_tone(pi); - mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); + write_phy_reg(pi, 0x631, Core1TxControl_old); + + write_phy_reg(pi, 0x44c, RFOverrideVal0_old); + write_phy_reg(pi, 0x44d, RFOverrideVal0_old); + write_phy_reg(pi, 0x4b0, rfoverride2_old); + write_phy_reg(pi, 0x4b1, rfoverride2val_old); + write_phy_reg(pi, 0x4f9, rfoverride3_old); + write_phy_reg(pi, 0x4fa, rfoverride3val_old); + write_phy_reg(pi, 0x938, rfoverride4_old); + write_phy_reg(pi, 0x939, rfoverride4val_old); + write_phy_reg(pi, 0x43b, afectrlovr_old); + write_phy_reg(pi, 0x43c, afectrlovrval_old); + write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); + write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); - for (i = 0; i < 11; i++) - write_radio_reg(pi, rxiq_cal_rf_reg[i], - values_to_save[i]); + wlc_lcnphy_clear_trsw_override(pi); - if (tx_gain_override_old) - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); - else - wlc_lcnphy_disable_tx_gain_override(pi); + mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); - wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); - wlc_lcnphy_rx_gain_override_enable(pi, false); - } + for (i = 0; i < 11; i++) + write_radio_reg(pi, rxiq_cal_rf_reg[i], + values_to_save[i]); + + if (tx_gain_override_old) + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); + else + wlc_lcnphy_disable_tx_gain_override(pi); + + wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); + wlc_lcnphy_rx_gain_override_enable(pi, false); cal_done: kfree(ptr); -- cgit v0.10.2 From acf97e9b5a5246da1193cb564377a4e7793eb29e Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:36 +0200 Subject: brcmsmac: change pa_gain for bcm4313 iPA The function wlc_lcnphy_load_tx_gain_table() has a target PA gain specified for the iPA variant of the bcm4313. This gain value is reduced to avoid PA distortion. The if-statement is removed because it was rather redundant in the first place. Please note that this patch does not provide full iPA support. Cc: Jonas Gorski Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index e646ba0..8dc5d0f 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -4282,13 +4282,10 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, u16 pa_gain; u16 gm_gain; - if (CHSPEC_IS5G(pi->radio_chanspec)) - pa_gain = 0x70; - else - pa_gain = 0x70; - if (pi->sh->boardflags & BFL_FEM) pa_gain = 0x10; + else + pa_gain = 0x60; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 1; -- cgit v0.10.2 From d6b81daaa2afaa17d39b34e36ca17bc6062b4bc5 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:37 +0200 Subject: brcmsmac: use ARRAY_SIZE in phytbl_lcn.c This patch converts all sizeof(x)/sizeof(x[0]) instances to ARRAY_SIZE macro in phytbl_lcn.c. The patch was made using spatch with ARRAY_SIZE.cocci (see [1]). [1] https://github.com/coccinelle/coccinelle/tree/master/demos/janitorings Cc: Jonas Gorski Tested-by: David Herrmann Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index 622c01c..deae3a7 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c @@ -1507,117 +1507,103 @@ static const u32 dot11lcn_gain_tbl_5G[] = { const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = { {&dot11lcn_gain_tbl_rev0, - sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18, + ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_rev0, - sizeof(dot11lcn_aux_gain_idx_tbl_rev0) / - sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16} + ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_rev0, - sizeof(dot11lcn_gain_idx_tbl_rev0) / - sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32} + ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32} , }; static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = { {&dot11lcn_gain_tbl_rev1, - sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18, + ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_rev0, - sizeof(dot11lcn_aux_gain_idx_tbl_rev0) / - sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16} + ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_rev0, - sizeof(dot11lcn_gain_idx_tbl_rev0) / - sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32} + ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32} , }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = { {&dot11lcn_gain_tbl_2G, - sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0, + ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_2G, - sizeof(dot11lcn_aux_gain_idx_tbl_2G) / - sizeof(dot11lcn_aux_gain_idx_tbl_2G[0]), 14, 0, 16} + ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_2G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_2G, - sizeof(dot11lcn_gain_idx_tbl_2G) / sizeof(dot11lcn_gain_idx_tbl_2G[0]), + ARRAY_SIZE(dot11lcn_gain_idx_tbl_2G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_2G, - sizeof(dot11lcn_gain_val_tbl_2G) / sizeof(dot11lcn_gain_val_tbl_2G[0]), + ARRAY_SIZE(dot11lcn_gain_val_tbl_2G), 17, 0, 8} }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = { {&dot11lcn_gain_tbl_5G, - sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0, + ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_5G, - sizeof(dot11lcn_aux_gain_idx_tbl_5G) / - sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16} + ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_5G, - sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]), + ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_5G, - sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]), + ARRAY_SIZE(dot11lcn_gain_val_tbl_5G), 17, 0, 8} }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = { {&dot11lcn_gain_tbl_extlna_2G, - sizeof(dot11lcn_gain_tbl_extlna_2G) / - sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32} + ARRAY_SIZE(dot11lcn_gain_tbl_extlna_2G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_extlna_2G, - sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G) / - sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G[0]), 14, 0, 16} + ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_extlna_2G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_extlna_2G, - sizeof(dot11lcn_gain_idx_tbl_extlna_2G) / - sizeof(dot11lcn_gain_idx_tbl_extlna_2G[0]), 13, 0, 32} + ARRAY_SIZE(dot11lcn_gain_idx_tbl_extlna_2G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_extlna_2G, - sizeof(dot11lcn_gain_val_tbl_extlna_2G) / - sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8} + ARRAY_SIZE(dot11lcn_gain_val_tbl_extlna_2G), 17, 0, 8} }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = { {&dot11lcn_gain_tbl_5G, - sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0, + ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_5G, - sizeof(dot11lcn_aux_gain_idx_tbl_5G) / - sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16} + ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_5G, - sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]), + ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_5G, - sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]), + ARRAY_SIZE(dot11lcn_gain_val_tbl_5G), 17, 0, 8} }; const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 = - sizeof(dot11lcnphytbl_rx_gain_info_rev0) / - sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]); + ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_rev0); const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz = - sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) / - sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]); + ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_2G_rev2); const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz = - sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) / - sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]); + ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_5G_rev2); static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = { 0x014d, @@ -2771,89 +2757,74 @@ static const u32 dot11lcn_papd_compdelta_tbl_rev0[] = { const struct phytbl_info dot11lcnphytbl_info_rev0[] = { {&dot11lcn_min_sig_sq_tbl_rev0, - sizeof(dot11lcn_min_sig_sq_tbl_rev0) / - sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16} + ARRAY_SIZE(dot11lcn_min_sig_sq_tbl_rev0), 2, 0, 16} , {&dot11lcn_noise_scale_tbl_rev0, - sizeof(dot11lcn_noise_scale_tbl_rev0) / - sizeof(dot11lcn_noise_scale_tbl_rev0[0]), 1, 0, 16} + ARRAY_SIZE(dot11lcn_noise_scale_tbl_rev0), 1, 0, 16} , {&dot11lcn_fltr_ctrl_tbl_rev0, - sizeof(dot11lcn_fltr_ctrl_tbl_rev0) / - sizeof(dot11lcn_fltr_ctrl_tbl_rev0[0]), 11, 0, 32} + ARRAY_SIZE(dot11lcn_fltr_ctrl_tbl_rev0), 11, 0, 32} , {&dot11lcn_ps_ctrl_tbl_rev0, - sizeof(dot11lcn_ps_ctrl_tbl_rev0) / - sizeof(dot11lcn_ps_ctrl_tbl_rev0[0]), 12, 0, 32} + ARRAY_SIZE(dot11lcn_ps_ctrl_tbl_rev0), 12, 0, 32} , {&dot11lcn_gain_idx_tbl_rev0, - sizeof(dot11lcn_gain_idx_tbl_rev0) / - sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32} + ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_rev0, - sizeof(dot11lcn_aux_gain_idx_tbl_rev0) / - sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16} + ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16} , {&dot11lcn_sw_ctrl_tbl_rev0, - sizeof(dot11lcn_sw_ctrl_tbl_rev0) / - sizeof(dot11lcn_sw_ctrl_tbl_rev0[0]), 15, 0, 16} + ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_rev0), 15, 0, 16} , {&dot11lcn_nf_table_rev0, - sizeof(dot11lcn_nf_table_rev0) / sizeof(dot11lcn_nf_table_rev0[0]), 16, + ARRAY_SIZE(dot11lcn_nf_table_rev0), 16, 0, 8} , {&dot11lcn_gain_val_tbl_rev0, - sizeof(dot11lcn_gain_val_tbl_rev0) / - sizeof(dot11lcn_gain_val_tbl_rev0[0]), 17, 0, 8} + ARRAY_SIZE(dot11lcn_gain_val_tbl_rev0), 17, 0, 8} , {&dot11lcn_gain_tbl_rev0, - sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18, + ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18, 0, 32} , {&dot11lcn_spur_tbl_rev0, - sizeof(dot11lcn_spur_tbl_rev0) / sizeof(dot11lcn_spur_tbl_rev0[0]), 20, + ARRAY_SIZE(dot11lcn_spur_tbl_rev0), 20, 0, 8} , {&dot11lcn_unsup_mcs_tbl_rev0, - sizeof(dot11lcn_unsup_mcs_tbl_rev0) / - sizeof(dot11lcn_unsup_mcs_tbl_rev0[0]), 23, 0, 16} + ARRAY_SIZE(dot11lcn_unsup_mcs_tbl_rev0), 23, 0, 16} , {&dot11lcn_iq_local_tbl_rev0, - sizeof(dot11lcn_iq_local_tbl_rev0) / - sizeof(dot11lcn_iq_local_tbl_rev0[0]), 0, 0, 16} + ARRAY_SIZE(dot11lcn_iq_local_tbl_rev0), 0, 0, 16} , {&dot11lcn_papd_compdelta_tbl_rev0, - sizeof(dot11lcn_papd_compdelta_tbl_rev0) / - sizeof(dot11lcn_papd_compdelta_tbl_rev0[0]), 24, 0, 32} + ARRAY_SIZE(dot11lcn_papd_compdelta_tbl_rev0), 24, 0, 32} , }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = { &dot11lcn_sw_ctrl_tbl_4313_rev0, - sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) / - sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16 + ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_rev0), 15, 0, 16 }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = { &dot11lcn_sw_ctrl_tbl_4313_epa_rev0, - sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) / - sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16 + ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0), 15, 0, 16 }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = { &dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo, - sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) / - sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16 + ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo), 15, 0, 16 }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = { &dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0, - sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) / - sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16 + ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0), 15, 0, 16 }; const u32 dot11lcnphytbl_info_sz_rev0 = - sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]); + ARRAY_SIZE(dot11lcnphytbl_info_rev0); const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = { -- cgit v0.10.2 From 67e39c5ae7369bad2a1c2d631e5b971be2e1edc1 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:38 +0200 Subject: brcmsmac: add debug info message providing phy and radio info For debug purposes it is good to have the phy and radio information available in the log. Only logged when driver is built when BRCMDBG or BRCM_TRACING kconfig are set. Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 7ca10bf..c3c6123 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -4652,7 +4652,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, wlc->band->phyrev = wlc_hw->band->phyrev; wlc->band->radioid = wlc_hw->band->radioid; wlc->band->radiorev = wlc_hw->band->radiorev; - + brcms_dbg_info(core, "wl%d: phy %u/%u radio %x/%u\n", unit, + wlc->band->phytype, wlc->band->phyrev, + wlc->band->radioid, wlc->band->radiorev); /* default contention windows size limits */ wlc_hw->band->CWmin = APHY_CWMIN; wlc_hw->band->CWmax = PHY_CWMAX; -- cgit v0.10.2 From ab9a50e387dce5f33a9d7efd64375456ac9d996e Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:39 +0200 Subject: brcmsmac: update transmit gain table for lcn phy Update the transmit gain table for bcm4313 chip family. Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index deae3a7..efd5a58 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c @@ -2959,134 +2959,134 @@ dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = { }; const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = { - {7, 0, 31, 0, 72}, - {7, 0, 31, 0, 70}, - {7, 0, 31, 0, 68}, - {7, 0, 30, 0, 67}, - {7, 0, 29, 0, 68}, - {7, 0, 28, 0, 68}, - {7, 0, 27, 0, 69}, - {7, 0, 26, 0, 70}, - {7, 0, 25, 0, 70}, - {7, 0, 24, 0, 71}, - {7, 0, 23, 0, 72}, - {7, 0, 23, 0, 70}, - {7, 0, 22, 0, 71}, - {7, 0, 21, 0, 72}, - {7, 0, 21, 0, 70}, - {7, 0, 21, 0, 68}, - {7, 0, 21, 0, 66}, - {7, 0, 21, 0, 64}, - {7, 0, 21, 0, 63}, - {7, 0, 20, 0, 64}, - {7, 0, 19, 0, 65}, - {7, 0, 19, 0, 64}, - {7, 0, 18, 0, 65}, - {7, 0, 18, 0, 64}, - {7, 0, 17, 0, 65}, - {7, 0, 17, 0, 64}, - {7, 0, 16, 0, 65}, - {7, 0, 16, 0, 64}, - {7, 0, 16, 0, 62}, - {7, 0, 16, 0, 60}, - {7, 0, 16, 0, 58}, - {7, 0, 15, 0, 61}, - {7, 0, 15, 0, 59}, - {7, 0, 14, 0, 61}, - {7, 0, 14, 0, 60}, - {7, 0, 14, 0, 58}, - {7, 0, 13, 0, 60}, - {7, 0, 13, 0, 59}, - {7, 0, 12, 0, 62}, - {7, 0, 12, 0, 60}, - {7, 0, 12, 0, 58}, - {7, 0, 11, 0, 62}, - {7, 0, 11, 0, 60}, - {7, 0, 11, 0, 59}, - {7, 0, 11, 0, 57}, - {7, 0, 10, 0, 61}, - {7, 0, 10, 0, 59}, - {7, 0, 10, 0, 57}, - {7, 0, 9, 0, 62}, - {7, 0, 9, 0, 60}, - {7, 0, 9, 0, 58}, - {7, 0, 9, 0, 57}, - {7, 0, 8, 0, 62}, - {7, 0, 8, 0, 60}, - {7, 0, 8, 0, 58}, - {7, 0, 8, 0, 57}, - {7, 0, 8, 0, 55}, - {7, 0, 7, 0, 61}, + {15, 0, 31, 0, 72}, + {15, 0, 31, 0, 70}, + {15, 0, 31, 0, 68}, + {15, 0, 30, 0, 68}, + {15, 0, 29, 0, 69}, + {15, 0, 28, 0, 69}, + {15, 0, 27, 0, 70}, + {15, 0, 26, 0, 70}, + {15, 0, 25, 0, 71}, + {15, 0, 24, 0, 72}, + {15, 0, 23, 0, 73}, + {15, 0, 23, 0, 71}, + {15, 0, 22, 0, 72}, + {15, 0, 21, 0, 73}, + {15, 0, 21, 0, 71}, + {15, 0, 21, 0, 69}, + {15, 0, 21, 0, 67}, + {15, 0, 21, 0, 65}, + {15, 0, 21, 0, 63}, + {15, 0, 20, 0, 65}, + {15, 0, 19, 0, 66}, + {15, 0, 19, 0, 64}, + {15, 0, 18, 0, 66}, + {15, 0, 18, 0, 64}, + {15, 0, 17, 0, 66}, + {15, 0, 17, 0, 64}, + {15, 0, 16, 0, 66}, + {15, 0, 16, 0, 64}, + {15, 0, 16, 0, 62}, + {15, 0, 16, 0, 61}, + {15, 0, 16, 0, 59}, + {15, 0, 15, 0, 61}, + {15, 0, 15, 0, 59}, + {15, 0, 14, 0, 62}, + {15, 0, 14, 0, 60}, + {15, 0, 14, 0, 58}, + {15, 0, 13, 0, 61}, + {15, 0, 13, 0, 59}, + {15, 0, 12, 0, 62}, + {15, 0, 12, 0, 61}, + {15, 0, 12, 0, 59}, + {15, 0, 11, 0, 62}, + {15, 0, 11, 0, 61}, + {15, 0, 11, 0, 59}, + {15, 0, 11, 0, 57}, + {15, 0, 10, 0, 61}, + {15, 0, 10, 0, 59}, + {15, 0, 10, 0, 58}, + {15, 0, 9, 0, 62}, + {15, 0, 9, 0, 61}, + {15, 0, 9, 0, 59}, + {15, 0, 9, 0, 57}, + {15, 0, 8, 0, 62}, + {15, 0, 8, 0, 61}, + {15, 0, 8, 0, 59}, + {15, 0, 8, 0, 57}, + {15, 0, 8, 0, 56}, + {15, 0, 8, 0, 54}, + {15, 0, 8, 0, 53}, + {15, 0, 8, 0, 51}, + {15, 0, 8, 0, 50}, + {7, 0, 7, 0, 69}, + {7, 0, 7, 0, 67}, + {7, 0, 7, 0, 65}, + {7, 0, 7, 0, 64}, + {7, 0, 7, 0, 62}, {7, 0, 7, 0, 60}, {7, 0, 7, 0, 58}, - {7, 0, 7, 0, 56}, + {7, 0, 7, 0, 57}, {7, 0, 7, 0, 55}, {7, 0, 6, 0, 62}, - {7, 0, 6, 0, 60}, - {7, 0, 6, 0, 58}, + {7, 0, 6, 0, 61}, + {7, 0, 6, 0, 59}, {7, 0, 6, 0, 57}, - {7, 0, 6, 0, 55}, + {7, 0, 6, 0, 56}, {7, 0, 6, 0, 54}, - {7, 0, 6, 0, 52}, + {7, 0, 6, 0, 53}, {7, 0, 5, 0, 61}, - {7, 0, 5, 0, 59}, - {7, 0, 5, 0, 57}, + {7, 0, 5, 0, 60}, + {7, 0, 5, 0, 58}, {7, 0, 5, 0, 56}, - {7, 0, 5, 0, 54}, + {7, 0, 5, 0, 55}, {7, 0, 5, 0, 53}, - {7, 0, 5, 0, 51}, - {7, 0, 4, 0, 62}, - {7, 0, 4, 0, 60}, - {7, 0, 4, 0, 58}, + {7, 0, 5, 0, 52}, + {7, 0, 5, 0, 50}, + {7, 0, 5, 0, 49}, + {7, 0, 5, 0, 47}, {7, 0, 4, 0, 57}, - {7, 0, 4, 0, 55}, + {7, 0, 4, 0, 56}, {7, 0, 4, 0, 54}, - {7, 0, 4, 0, 52}, + {7, 0, 4, 0, 53}, {7, 0, 4, 0, 51}, - {7, 0, 4, 0, 49}, + {7, 0, 4, 0, 50}, {7, 0, 4, 0, 48}, + {7, 0, 4, 0, 47}, {7, 0, 4, 0, 46}, - {7, 0, 3, 0, 60}, - {7, 0, 3, 0, 58}, - {7, 0, 3, 0, 57}, - {7, 0, 3, 0, 55}, - {7, 0, 3, 0, 54}, - {7, 0, 3, 0, 52}, + {7, 0, 4, 0, 44}, + {7, 0, 4, 0, 43}, + {7, 0, 4, 0, 42}, + {7, 0, 4, 0, 41}, + {7, 0, 4, 0, 40}, {7, 0, 3, 0, 51}, - {7, 0, 3, 0, 49}, + {7, 0, 3, 0, 50}, {7, 0, 3, 0, 48}, + {7, 0, 3, 0, 47}, {7, 0, 3, 0, 46}, - {7, 0, 3, 0, 45}, {7, 0, 3, 0, 44}, {7, 0, 3, 0, 43}, + {7, 0, 3, 0, 42}, {7, 0, 3, 0, 41}, - {7, 0, 2, 0, 61}, - {7, 0, 2, 0, 59}, - {7, 0, 2, 0, 57}, - {7, 0, 2, 0, 56}, - {7, 0, 2, 0, 54}, - {7, 0, 2, 0, 53}, - {7, 0, 2, 0, 51}, - {7, 0, 2, 0, 50}, - {7, 0, 2, 0, 48}, - {7, 0, 2, 0, 47}, - {7, 0, 2, 0, 46}, - {7, 0, 2, 0, 44}, - {7, 0, 2, 0, 43}, - {7, 0, 2, 0, 42}, - {7, 0, 2, 0, 41}, - {7, 0, 2, 0, 39}, - {7, 0, 2, 0, 38}, - {7, 0, 2, 0, 37}, - {7, 0, 2, 0, 36}, - {7, 0, 2, 0, 35}, - {7, 0, 2, 0, 34}, - {7, 0, 2, 0, 33}, - {7, 0, 2, 0, 32}, - {7, 0, 1, 0, 63}, - {7, 0, 1, 0, 61}, - {7, 0, 1, 0, 59}, - {7, 0, 1, 0, 57}, + {3, 0, 3, 0, 56}, + {3, 0, 3, 0, 54}, + {3, 0, 3, 0, 53}, + {3, 0, 3, 0, 51}, + {3, 0, 3, 0, 50}, + {3, 0, 3, 0, 48}, + {3, 0, 3, 0, 47}, + {3, 0, 3, 0, 46}, + {3, 0, 3, 0, 44}, + {3, 0, 3, 0, 43}, + {3, 0, 3, 0, 42}, + {3, 0, 3, 0, 41}, + {3, 0, 3, 0, 39}, + {3, 0, 3, 0, 38}, + {3, 0, 3, 0, 37}, + {3, 0, 3, 0, 36}, + {3, 0, 3, 0, 35}, + {3, 0, 3, 0, 34}, }; const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = { -- cgit v0.10.2 From 7de646854bb1405ccafaa5e3545df6657141f510 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:40 +0200 Subject: brcmsmac: change lcnphy receive i/q calibration routine The gain level control for the test tone has been changed. This calbration test tone is used to determine the i/q compensation. The i/q calibration routine has been reworked to accomodate this. Cc: Jonas Gorski Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 8dc5d0f..236e8d9 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1329,6 +1329,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples) return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; } +static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, + u16 tia_gain, u16 lna2_gain) +{ + u32 i_thresh_l, q_thresh_l; + u32 i_thresh_h, q_thresh_h; + struct lcnphy_iq_est iq_est_h, iq_est_l; + + wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, + lna2_gain, 0); + + wlc_lcnphy_rx_gain_override_enable(pi, true); + wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); + udelay(500); + write_radio_reg(pi, RADIO_2064_REG112, 0); + if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) + return false; + + wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); + udelay(500); + write_radio_reg(pi, RADIO_2064_REG112, 0); + if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) + return false; + + i_thresh_l = (iq_est_l.i_pwr << 1); + i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; + + q_thresh_l = (iq_est_l.q_pwr << 1); + q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; + if ((iq_est_h.i_pwr > i_thresh_l) && + (iq_est_h.i_pwr < i_thresh_h) && + (iq_est_h.q_pwr > q_thresh_l) && + (iq_est_h.q_pwr < q_thresh_h)) + return true; + + return false; +} + static bool wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, const struct lcnphy_rx_iqcomp *iqcomp, @@ -1343,8 +1380,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, rfoverride3_old, rfoverride3val_old, rfoverride4_old, rfoverride4val_old, afectrlovr_old, afectrlovrval_old; - int tia_gain; - u32 received_power, rx_pwr_threshold; + int tia_gain, lna2_gain, biq1_gain; + bool set_gain; u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; u16 values_to_save[11]; s16 *ptr; @@ -1432,29 +1469,30 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); - wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); write_phy_reg(pi, 0x6da, 0xffff); or_phy_reg(pi, 0x6db, 0x3); - wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); - wlc_lcnphy_rx_gain_override_enable(pi, true); - - tia_gain = 8; - rx_pwr_threshold = 950; - while (tia_gain > 0) { - tia_gain -= 1; - wlc_lcnphy_set_rx_gain_by_distribution(pi, - 0, 0, 2, 2, - (u16) - tia_gain, 1, 0); - udelay(500); - received_power = - wlc_lcnphy_measure_digital_power(pi, 2000); - if (received_power < rx_pwr_threshold) - break; + wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); + for (lna2_gain = 3; lna2_gain >= 0; lna2_gain--) { + for (tia_gain = 4; tia_gain >= 0; tia_gain--) { + for (biq1_gain = 6; biq1_gain >= 0; biq1_gain--) { + set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, + (u16) + biq1_gain, + (u16) + tia_gain, + (u16) + lna2_gain); + if (!set_gain) + continue; + + result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); + goto stop_tone; + } + } } - result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); +stop_tone: wlc_lcnphy_stop_tx_tone(pi); write_phy_reg(pi, 0x631, Core1TxControl_old); -- cgit v0.10.2 From d50ec00160b1842d492c8b871bcd16301454cdcf Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:41 +0200 Subject: brcmsmac: fix TSSI idle estimation The baseband multiplier must be zero during TSSI idle estimation and restored afterwards. Reviewed-by: Pieter-Paul Giesberts Tested-by: David Herrmann Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 236e8d9..e08b73a 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -2836,6 +2836,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) read_radio_reg(pi, RADIO_2064_REG007) & 1; u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; + u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); + idleTssi = read_phy_reg(pi, 0x4ab); suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)); @@ -2853,6 +2855,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); wlc_lcnphy_tssi_setup(pi); + + mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); + mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); + + wlc_lcnphy_set_bbmult(pi, 0x0); + wlc_phy_do_dummy_tx(pi, true, OFF); idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) >> 0); @@ -2874,6 +2882,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); + wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); wlc_lcnphy_set_tx_gain(pi, &old_gains); wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); -- cgit v0.10.2 From 3e72ef73c3414be5932ac75d0938747fb232d4ae Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:42 +0200 Subject: brcmsmac: avoid calling set_txpwr_by_index() twice For lcnphy revision 1 or when hardware supports i/q calibration the function wlc_lcnphy_set_txpwr_by_index() was called twice. Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index e08b73a..caced82 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -3897,7 +3897,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) target_gains.pad_gain = 21; target_gains.dac_gain = 0; wlc_lcnphy_set_tx_gain(pi, &target_gains); - wlc_lcnphy_set_tx_pwr_by_index(pi, 16); if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { @@ -3908,6 +3907,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) lcnphy_recal ? LCNPHY_CAL_RECAL : LCNPHY_CAL_FULL), false); } else { + wlc_lcnphy_set_tx_pwr_by_index(pi, 16); wlc_lcnphy_tx_iqlo_soft_cal_full(pi); } -- cgit v0.10.2 From 02fcc7535e92031695c50eb518e56ea09fd3c6e2 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:43 +0200 Subject: brcmsmac: rework switch control table init including iPA BT-combo Rework the code path in lcnphy tbl_init() for switch control table programming. This also takes the iPA BT-combo card into account. Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index caced82..08bcae4 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -4573,6 +4573,7 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) uint idx; u8 phybw40; struct phytbl_info tab; + const struct phytbl_info *tb; u32 val; phybw40 = CHSPEC_IS40(pi->radio_chanspec); @@ -4619,7 +4620,6 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) } if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { - const struct phytbl_info *tb; int l; if (CHSPEC_IS2G(pi->radio_chanspec)) { @@ -4640,21 +4640,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) wlc_lcnphy_write_table(pi, &tb[idx]); } - if ((pi->sh->boardflags & BFL_FEM) - && !(pi->sh->boardflags & BFL_FEM_BT)) - wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa); - else if (pi->sh->boardflags & BFL_FEM_BT) { - if (pi->sh->boardrev < 0x1250) - wlc_lcnphy_write_table( - pi, - &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa); + if (pi->sh->boardflags & BFL_FEM) { + if (pi->sh->boardflags & BFL_FEM_BT) { + if (pi->sh->boardrev < 0x1250) + tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa; + else + tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250; + } else { + tb = &dot11lcn_sw_ctrl_tbl_info_4313_epa; + } + } else { + if (pi->sh->boardflags & BFL_FEM_BT) + tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa; else - wlc_lcnphy_write_table( - pi, - &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250); - } else - wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313); - + tb = &dot11lcn_sw_ctrl_tbl_info_4313; + } + wlc_lcnphy_write_table(pi, tb); wlc_lcnphy_load_rfpower(pi); wlc_lcnphy_clear_papd_comptable(pi); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index efd5a58..d7fa312 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c @@ -2044,6 +2044,73 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { 0x0005, }; +static const u16 dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo[] = { + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, + 0x0005, + 0x0006, + 0x0009, + 0x000a, +}; + static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { 0x0004, 0x0004, @@ -2808,6 +2875,11 @@ const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = { ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_rev0), 15, 0, 16 }; +const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa = { + &dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo, + ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo), 15, 0, 16 +}; + const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = { &dot11lcn_sw_ctrl_tbl_4313_epa_rev0, ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0), 15, 0, 16 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h index 5f75e16..489422a 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h @@ -20,6 +20,7 @@ extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[]; extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev0; extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313; +extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa; extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa; extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa_combo; extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa; -- cgit v0.10.2 From 118e545a2cf34c8a660d050a72d8e711850f8f93 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:44 +0200 Subject: brcmsmac: correct phy registers for TSSI-based power control A number of additional phy registers needs to be programmed when using TSSI-based power control. Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 08bcae4..2917f5c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -2020,6 +2020,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos) } else { mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); + mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); + mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); + mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); + mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); + mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); + mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); + mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); + mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); + mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); + mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); } } else { mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); @@ -2106,6 +2116,7 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi) (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); + mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); } static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) @@ -2218,6 +2229,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); + mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); + mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); + mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); + wlc_lcnphy_pwrctrl_rssiparams(pi); } @@ -3096,6 +3111,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } + mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); + mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); + mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); + mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); + mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); -- cgit v0.10.2 From d37c8f0826becb82be0ea06b074f775fb4f75730 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 20 Aug 2013 16:00:45 +0200 Subject: brcmsmac: reinitialize TSSI power control upon channel switch When changing channels the TSSI based power control needs to be reinitialized. Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 2917f5c..8fb1048 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -5019,6 +5019,8 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec) wlc_lcnphy_load_tx_iir_filter(pi, true, 3); mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); + if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) + wlc_lcnphy_tssi_setup(pi); } void wlc_phy_detach_lcnphy(struct brcms_phy *pi) -- cgit v0.10.2 From 20c7d42a5005938ea6734ee0463f2031d843878f Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Wed, 21 Aug 2013 11:45:47 +0200 Subject: brcmsmac: add support for BCM4313 iPA variant This patch completes the changes needed for supporting the iPA variant cards of the BCM4313 wireless chipset. Tested-by: David Herrmann Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 8fb1048..b2d6d6d 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1826,6 +1826,19 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel) write_radio_reg(pi, RADIO_2064_REG038, 3); write_radio_reg(pi, RADIO_2064_REG091, 7); } + + if (!(pi->sh->boardflags & BFL_FEM)) { + static const u8 reg038[14] = { + 0xd, 0xe, 0xd, 0xd, 0xd, 0xc, 0xa, + 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0 + }; + + write_radio_reg(pi, RADIO_2064_REG02A, 0xf); + write_radio_reg(pi, RADIO_2064_REG091, 0x3); + write_radio_reg(pi, RADIO_2064_REG038, 0x3); + + write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); + } } static int @@ -2123,7 +2136,16 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) { struct phytbl_info tab; u32 rfseq, ind; + enum lcnphy_tssi_mode mode; + u8 tssi_sel; + if (pi->sh->boardflags & BFL_FEM) { + tssi_sel = 0x1; + mode = LCNPHY_TSSI_EXT; + } else { + tssi_sel = 0xe; + mode = LCNPHY_TSSI_POST_PA; + } tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_ptr = &ind; @@ -2144,7 +2166,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); + wlc_lcnphy_set_tssi_mux(pi, mode); mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); @@ -2180,9 +2202,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { - mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); + mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); } else { + mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); } @@ -4358,8 +4381,11 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, tab.tbl_len = 1; tab.tbl_ptr = &val; + /* fixed gm_gain value for iPA */ + gm_gain = 15; for (j = 0; j < 128; j++) { - gm_gain = gain_table[j].gm; + if (pi->sh->boardflags & BFL_FEM) + gm_gain = gain_table[j].gm; val = (((u32) pa_gain << 24) | (gain_table[j].pad << 16) | (gain_table[j].pga << 8) | gm_gain); @@ -4570,7 +4596,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) write_phy_reg(pi, 0x4ea, 0x4688); - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); + if (pi->sh->boardflags & BFL_FEM) + mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); + else + mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); @@ -4581,6 +4610,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) wlc_lcnphy_rcal(pi); wlc_lcnphy_rc_cal(pi); + + if (!(pi->sh->boardflags & BFL_FEM)) { + write_radio_reg(pi, RADIO_2064_REG032, 0x6f); + write_radio_reg(pi, RADIO_2064_REG033, 0x19); + write_radio_reg(pi, RADIO_2064_REG039, 0xe); + } + } static void wlc_lcnphy_radio_init(struct brcms_phy *pi) @@ -4611,22 +4647,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) wlc_lcnphy_write_table(pi, &tab); } - tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; - tab.tbl_width = 16; - tab.tbl_ptr = &val; - tab.tbl_len = 1; - - val = 114; - tab.tbl_offset = 0; - wlc_lcnphy_write_table(pi, &tab); + if (!(pi->sh->boardflags & BFL_FEM)) { + tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; + tab.tbl_width = 16; + tab.tbl_ptr = &val; + tab.tbl_len = 1; - val = 130; - tab.tbl_offset = 1; - wlc_lcnphy_write_table(pi, &tab); + val = 150; + tab.tbl_offset = 0; + wlc_lcnphy_write_table(pi, &tab); - val = 6; - tab.tbl_offset = 8; - wlc_lcnphy_write_table(pi, &tab); + val = 220; + tab.tbl_offset = 1; + wlc_lcnphy_write_table(pi, &tab); + } if (CHSPEC_IS2G(pi->radio_chanspec)) { if (pi->sh->boardflags & BFL_FEM) @@ -5059,8 +5093,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) return false; - if ((pi->sh->boardflags & BFL_FEM) && - (LCNREV_IS(pi->pubpi.phy_rev, 1))) { + if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { if (pi_lcn->lcnphy_tempsense_option == 3) { pi->hwpwrctrl = true; pi->hwpwrctrl_capable = true; -- cgit v0.10.2 From 15edae91cb2d91131ab3b0c6eebf1fbfc12d643e Mon Sep 17 00:00:00 2001 From: Peter Wu Date: Wed, 21 Aug 2013 23:17:11 +0200 Subject: r8169: fix invalid register dump For some reason, my PCIe RTL8111E onboard NIC on a GA-Z68X-UD3H-B3 motherboard reads as FFs when reading from MMIO with a block size larger than 7. Therefore change to reading blocks of four bytes. Ben Hutchings noted that the buffer is large enough to hold all registers, so now all registers are read. Signed-off-by: Peter Wu Acked-by: Francois Romieu Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 93ee49d..c0c9e14 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1897,9 +1897,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; rtl_lock_work(tp); - memcpy_fromio(p, tp->mmio_addr, regs->len); + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); rtl_unlock_work(tp); } -- cgit v0.10.2 From 0bf2bbd27717e7766c74205b9aa6f7c462c28f6d Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 22 Aug 2013 02:17:25 +0400 Subject: SH7619: fix typo in Ether platform data Commit 06a64f91da72cb5827e2bedef2ead60a123fd66e (SH7619: fix Ether support) has a typo in the 'phy_interface' field name of the platform data which causes build error -- fix it. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index 1947281..4df4d4f 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -114,7 +114,7 @@ static struct platform_device scif2_device = { static struct sh_eth_plat_data eth_platform_data = { .phy = 1, .edmac_endian = EDMAC_LITTLE_ENDIAN, - .phy_interace = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct resource eth_resources[] = { -- cgit v0.10.2 From be23ab5131621c0dadacdd72de7c94ab168627ee Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 22 Aug 2013 02:18:30 +0400 Subject: SolutionEngine7724: fix typo in Ether platform data Commit bd61224b1cbec096694e89c4187119c8576fe186 (SolutionEngine7724: fix Ether support) has a typo in the 'phy_interface' field name of the platform data which causes build error -- fix it. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index e96e053..21e4230 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -377,7 +377,7 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8187 */ .edmac_endian = EDMAC_LITTLE_ENDIAN, - .phy_interace = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device sh_eth_device = { -- cgit v0.10.2 From 0f7cc9a3c2bd89b15720dbf358e9b9e62af27126 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 21 Aug 2013 17:29:23 -0700 Subject: tcp: increase throughput when reordering is high The stack currently detects reordering and avoid spurious retransmission very well. However the throughput is sub-optimal under high reordering because cwnd is increased only if the data is deliverd in order. I.e., FLAG_DATA_ACKED check in tcp_ack(). The more packet are reordered the worse the throughput is. Therefore when reordering is proven high, cwnd should advance whenever the data is delivered regardless of its ordering. If reordering is low, conservatively advance cwnd only on ordered deliveries in Open state, and retain cwnd in Disordered state (RFC5681). Using netperf on a qdisc setup of 20Mbps BW and random RTT from 45ms to 55ms (for reordering effect). This change increases TCP throughput by 20 - 25% to near bottleneck BW. A special case is the stretched ACK with new SACK and/or ECE mark. For example, a receiver may receive an out of order or ECN packet with unacked data buffered because of LRO or delayed ACK. The principle on such an ACK is to advance cwnd on the cummulative acked part first, then reduce cwnd in tcp_fastretrans_alert(). Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e965cc7..ec492ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2485,8 +2485,6 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); - if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) - tcp_moderate_cwnd(tp); } else { tcp_cwnd_reduction(sk, prior_unsacked, 0); } @@ -3128,11 +3126,24 @@ static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) inet_csk(sk)->icsk_ca_state != TCP_CA_Open; } +/* Decide wheather to run the increase function of congestion control. */ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) { - const struct tcp_sock *tp = tcp_sk(sk); - return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && - !tcp_in_cwnd_reduction(sk); + if (tcp_in_cwnd_reduction(sk)) + return false; + + /* If reordering is high then always grow cwnd whenever data is + * delivered regardless of its ordering. Otherwise stay conservative + * and only grow cwnd on in-order delivery in Open state, and retain + * cwnd in Disordered state (RFC5681). A stretched ACK with + * new SACK or ECE mark may first advance cwnd here and later reduce + * cwnd in tcp_fastretrans_alert() based on more states. + */ + if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) + return flag & FLAG_FORWARD_PROGRESS; + + return inet_csk(sk)->icsk_ca_state == TCP_CA_Open && + flag & FLAG_DATA_ACKED; } /* Check that window update is acceptable. @@ -3352,18 +3363,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt); acked -= tp->packets_out; + /* Advance cwnd if state allows */ + if (tcp_may_raise_cwnd(sk, flag)) + tcp_cong_avoid(sk, ack, prior_in_flight); + if (tcp_ack_is_dubious(sk, flag)) { - /* Advance CWND, if state allows this. */ - if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) - tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, acked, prior_unsacked, is_dupack, flag); - } else { - if (flag & FLAG_DATA_ACKED) - tcp_cong_avoid(sk, ack, prior_in_flight); } - if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); -- cgit v0.10.2 From 092270656d7f05f6d0c4eb79c8a74a2ce1b45bec Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Thu, 22 Aug 2013 10:55:21 +0900 Subject: ethernet: moxa: remove unnecessary platform_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure. Thus, it is not needed to manually clear the device driver data to NULL. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 6eee686..83c2091 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -533,7 +533,6 @@ static int moxart_remove(struct platform_device *pdev) unregister_netdev(ndev); free_irq(ndev->irq, ndev); moxart_mac_free_memory(ndev); - platform_set_drvdata(pdev, NULL); free_netdev(ndev); return 0; -- cgit v0.10.2 From 3bca8de22082f6c02a176dbb344e5234d9d5cc02 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Thu, 22 Aug 2013 10:57:20 +0900 Subject: ethernet: broadcom: remove unnecessary platform_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure. Thus, it is not needed to manually clear the device driver data to NULL. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 89ff09e..190219e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -2836,7 +2836,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } -- cgit v0.10.2 From b4c1c1d03842e6b24e87e48c07769e105df99985 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 21 Aug 2013 19:47:58 +0200 Subject: net: tcp_probe: also include rcv_wnd next to snd_wnd It is helpful to sometimes know the TCP window sizes of an established socket e.g. to confirm that window scaling is working or to tweak the window size to improve high-latency connections, etc etc. Currently the TCP snooper only exports the send window size, but not the receive window size. Therefore, also add the receive window size to the end of the output line. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index d4943f6..fae788b 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -60,6 +60,7 @@ struct tcp_log { u32 snd_nxt; u32 snd_una; u32 snd_wnd; + u32 rcv_wnd; u32 snd_cwnd; u32 ssthresh; u32 srtt; @@ -116,6 +117,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, p->snd_una = tp->snd_una; p->snd_cwnd = tp->snd_cwnd; p->snd_wnd = tp->snd_wnd; + p->rcv_wnd = tp->rcv_wnd; p->ssthresh = tcp_current_ssthresh(sk); p->srtt = tp->srtt >> 3; @@ -157,13 +159,13 @@ static int tcpprobe_sprint(char *tbuf, int n) = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); return scnprintf(tbuf, n, - "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n", + "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u %u\n", (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec, &p->saddr, ntohs(p->sport), &p->daddr, ntohs(p->dport), p->length, p->snd_nxt, p->snd_una, - p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt); + p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); } static ssize_t tcpprobe_read(struct file *file, char __user *buf, -- cgit v0.10.2 From d8cdeda6ddbcb33debbb87590a7d42ff7f5b5cfd Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 21 Aug 2013 19:47:59 +0200 Subject: net: tcp_probe: kprobes: adapt jtcp_rcv_established signature This patches fixes a rather unproblematic function signature mismatch as the const specifier was missing for the th variable; and next to that it adds a build-time assertion so that future function signature mismatches for kprobes will not end badly, similarly as commit 22222997 ("net: sctp: add build check for sctp_sf_eat_sack_6_2/jsctp_sf_eat_sack") did it for SCTP. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index fae788b..a2392f4 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -92,7 +92,7 @@ static inline int tcp_probe_avail(void) * Note: arguments must match tcp_rcv_established()! */ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, - struct tcphdr *th, unsigned int len) + const struct tcphdr *th, unsigned int len) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_sock *inet = inet_sk(sk); @@ -225,6 +225,13 @@ static __init int tcpprobe_init(void) { int ret = -ENOMEM; + /* Warning: if the function signature of tcp_rcv_established, + * has been changed, you also have to change the signature of + * jtcp_rcv_established, otherwise you end up right here! + */ + BUILD_BUG_ON(__same_type(tcp_rcv_established, + jtcp_rcv_established) == 0); + init_waitqueue_head(&tcp_probe.wait); spin_lock_init(&tcp_probe.lock); -- cgit v0.10.2 From f925d0a62db3f1b6e463ef956d0855006538d002 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 21 Aug 2013 19:48:00 +0200 Subject: net: tcp_probe: add IPv6 support The tcp_probe currently only supports analysis of IPv4 connections. Therefore, it would be nice to have IPv6 supported as well. Since we have the recently added %pISpc specifier that is IPv4/IPv6 generic, build related sockaddress structures from the flow information and pass this to our format string. Tested with SSH and HTTP sessions on IPv4 and IPv6. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index a2392f4..301a3ef 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -54,8 +54,11 @@ static const char procname[] = "tcpprobe"; struct tcp_log { ktime_t tstamp; - __be32 saddr, daddr; - __be16 sport, dport; + union { + struct sockaddr raw; + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } src, dst; u16 length; u32 snd_nxt; u32 snd_una; @@ -87,6 +90,30 @@ static inline int tcp_probe_avail(void) return bufsize - tcp_probe_used() - 1; } +#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \ + do { \ + si4.sin_family = AF_INET; \ + si4.sin_port = inet->inet_##mem##port; \ + si4.sin_addr.s_addr = inet->inet_##mem##addr; \ + } while (0) \ + +#if IS_ENABLED(CONFIG_IPV6) +#define tcp_probe_copy_fl_to_si6(inet, si6, mem) \ + do { \ + struct ipv6_pinfo *pi6 = inet->pinet6; \ + si6.sin6_family = AF_INET6; \ + si6.sin6_port = inet->inet_##mem##port; \ + si6.sin6_addr = pi6->mem##addr; \ + si6.sin6_flowinfo = 0; /* No need here. */ \ + si6.sin6_scope_id = 0; /* No need here. */ \ + } while (0) +#else +#define tcp_probe_copy_fl_to_si6(fl, si6, mem) \ + do { \ + memset(&si6, 0, sizeof(si6)); \ + } while (0) +#endif + /* * Hook inserted to be called before each receive packet. * Note: arguments must match tcp_rcv_established()! @@ -108,10 +135,19 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, struct tcp_log *p = tcp_probe.log + tcp_probe.head; p->tstamp = ktime_get(); - p->saddr = inet->inet_saddr; - p->sport = inet->inet_sport; - p->daddr = inet->inet_daddr; - p->dport = inet->inet_dport; + switch (sk->sk_family) { + case AF_INET: + tcp_probe_copy_fl_to_si4(inet, p->src.v4, s); + tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d); + break; + case AF_INET6: + tcp_probe_copy_fl_to_si6(inet, p->src.v6, s); + tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d); + break; + default: + BUG(); + } + p->length = skb->len; p->snd_nxt = tp->snd_nxt; p->snd_una = tp->snd_una; @@ -159,12 +195,10 @@ static int tcpprobe_sprint(char *tbuf, int n) = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); return scnprintf(tbuf, n, - "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u %u\n", + "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n", (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec, - &p->saddr, ntohs(p->sport), - &p->daddr, ntohs(p->dport), - p->length, p->snd_nxt, p->snd_una, + &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una, p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); } -- cgit v0.10.2 From 0ca04b63801b6cbdaa539c89945982ad9dc8d858 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 22 Aug 2013 13:47:00 +0200 Subject: net: ethernet: davinci_cpdma: export cpdma_chan_get_stats This is needed when the cpsw driver is built as module. Signed-off-by: Daniel Mack Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 031ebc8..90a7946 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -591,6 +591,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan, spin_unlock_irqrestore(&chan->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); int cpdma_chan_dump(struct cpdma_chan *chan) { -- cgit v0.10.2 From 05f147ef7c5289e9aba488b3e71a4259be5a1420 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 22 Aug 2013 17:12:50 +0200 Subject: net: sctp_probe: simplify code by using %pISc format specifier We can simply use the %pISc format specifier that was recently added and thus remove some code that distinguishes between IPv4 and IPv6. Signed-off-by: Daniel Borkmann Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/net/sctp/probe.c b/net/sctp/probe.c index e62c225..cd72ae5 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -155,13 +155,8 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, if (sp == asoc->peer.primary_path) printl("*"); - if (sp->ipaddr.sa.sa_family == AF_INET) - printl("%pI4 ", &sp->ipaddr.v4.sin_addr); - else - printl("%pI6 ", &sp->ipaddr.v6.sin6_addr); - - printl("%2u %8u %8u %8u %8u %8u ", - sp->state, sp->cwnd, sp->ssthresh, + printl("%pISc %2u %8u %8u %8u %8u %8u ", + &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh, sp->flight_size, sp->partial_bytes_acked, sp->pathmtu); } -- cgit v0.10.2 From 35b9eb0eee736e3d984d24873e0eff85dba6037f Mon Sep 17 00:00:00 2001 From: Gerhard Sittig Date: Thu, 22 Aug 2013 21:55:12 +0200 Subject: fs_enet: silence a build warning (unused variable) Since commit 720a43efd30f04a0a492c85fb997361c44fbae05 (drivers:net: Remove unnecessary OOM messages after netdev_alloc_skb) there is a build warning: drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c: In function 'tx_skb_align_workaround': drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c:586:26: warning: unused variable 'fep' Fix it. Signed-off-by: Gerhard Sittig Signed-off-by: Anatolij Gustschin Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 8de53a1..c04eb3a 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -583,7 +583,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, struct sk_buff *skb) { struct sk_buff *new_skb; - struct fs_enet_private *fep = netdev_priv(dev); /* Alloc new skb */ new_skb = netdev_alloc_skb(dev, skb->len + 4); -- cgit v0.10.2 From 2771399ac9986c75437a83b1c723493cfcdfa439 Mon Sep 17 00:00:00 2001 From: Gerhard Sittig Date: Thu, 22 Aug 2013 21:55:13 +0200 Subject: fs_enet: cleanup clock API use make the Freescale ethernet driver get, prepare and enable the FEC clock during probe(); disable and unprepare the clock upon remove(), put is done by the devm approach; hold a reference to the clock over the period of use. clock lookup is non-fatal as not all platforms provide clock specs in their device tree; failure to enable specified clocks is fatal. Signed-off-by: Gerhard Sittig Signed-off-by: Anatolij Gustschin Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index c04eb3a..6b60582 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -999,6 +999,8 @@ static int fs_enet_probe(struct platform_device *ofdev) struct fs_enet_private *fep; struct fs_platform_info *fpi; const u32 *data; + struct clk *clk; + int err; const u8 *mac_addr; const char *phy_connection_type; int privsize, len, ret = -ENODEV; @@ -1036,6 +1038,20 @@ static int fs_enet_probe(struct platform_device *ofdev) fpi->use_rmii = 1; } + /* make clock lookup non-fatal (the driver is shared among platforms), + * but require enable to succeed when a clock was specified/found, + * keep a reference to the clock upon successful acquisition + */ + clk = devm_clk_get(&ofdev->dev, "per"); + if (!IS_ERR(clk)) { + err = clk_prepare_enable(clk); + if (err) { + ret = err; + goto out_free_fpi; + } + fpi->clk_per = clk; + } + privsize = sizeof(*fep) + sizeof(struct sk_buff **) * (fpi->rx_ring + fpi->tx_ring); @@ -1107,6 +1123,8 @@ out_free_dev: free_netdev(ndev); out_put: of_node_put(fpi->phy_node); + if (fpi->clk_per) + clk_disable_unprepare(fpi->clk_per); out_free_fpi: kfree(fpi); return ret; @@ -1123,6 +1141,8 @@ static int fs_enet_remove(struct platform_device *ofdev) fep->ops->cleanup_data(ndev); dev_set_drvdata(fep->dev, NULL); of_node_put(fep->fpi->phy_node); + if (fep->fpi->clk_per) + clk_disable_unprepare(fep->fpi->clk_per); free_netdev(ndev); return 0; } diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 343d82a..efb0596 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -16,6 +16,7 @@ #ifndef FS_ENET_PD_H #define FS_ENET_PD_H +#include #include #include #include @@ -143,6 +144,8 @@ struct fs_platform_info { int use_rmii; /* use RMII mode */ int has_phy; /* if the network is phy container as well...*/ + + struct clk *clk_per; /* 'per' clock for register access */ }; struct fs_mii_fec_platform_info { u32 irq[32]; -- cgit v0.10.2 From 1fb9026000e66ffe032b11ec724c1bc7d068198e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 21 Aug 2013 11:24:01 +0200 Subject: mac80211: move setting WIPHY_FLAG_SUPPORTS_SCHED_SCAN into drivers mac80211 currently sets WIPHY_FLAG_SUPPORTS_SCHED_SCAN based on whether the start_sched_scan operation is supported or not, but that will not be correct for all drivers, we're adding scheduled scan to the iwlmvm driver but it depends on firmware support. Therefore, move setting WIPHY_FLAG_SUPPORTS_SCHED_SCAN into the drivers so that they can control it regardless of implementing the operation. This currently only affects the TI drivers since they're the only ones implementing scheduled scan (in a mac80211 driver.) Acked-by: Luciano Coelho Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index d1b19c3..38995f9 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5623,7 +5623,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->max_remain_on_channel_duration = 5000; wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_SUPPORTS_SCHED_SCAN; /* make sure all our channels fit in the scanned_ch bitmask */ BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 25eb35b..21d5d44 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -892,9 +892,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (!local->ops->remain_on_channel) local->hw.wiphy->max_remain_on_channel_duration = 5000; - if (local->ops->sched_scan_start) - local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; - /* mac80211 based drivers don't support internal TDLS setup */ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; -- cgit v0.10.2 From a4ef66a915b957416a89a48365aea2ec2dc551f6 Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Thu, 22 Aug 2013 10:28:58 -0700 Subject: mac80211: only respond to probe request with mesh ID Previously, the mesh STA responds to probe request from legacy STA but now it will only respond to legacy STA if the legacy STA does include the specific mesh ID or wildcard mesh ID in the probe request. The iw patch "iw: scan using meshid" can be used either by legacy STA or by mesh STA to do active scanning by inserting the mesh ID in the probe request frame. Signed-off-by: Chun-Yeow Yeoh Acked-by: Thomas Pedersen Acked-by: Javier Cardona Signed-off-by: Johannes Berg diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 885a5f6..707ac61 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -832,6 +832,9 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, ieee802_11_parse_elems(pos, len - baselen, false, &elems); + if (!elems.mesh_id) + return; + /* 802.11-2012 10.1.4.3.2 */ if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && !is_broadcast_ether_addr(mgmt->da)) || -- cgit v0.10.2 From c4c205f3cd17b567b8e20098522416eac2e73960 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Fri, 23 Aug 2013 09:35:38 -0400 Subject: mac80211: assign seqnums for group QoS frames According to 802.11-2012 9.3.2.10, paragraph 4, QoS data frames with a group address in the Address 1 field have sequence numbers allocated from the same counter as non-QoS data and management frames. Without this flag, some drivers may not assign sequence numbers, and in rare cases frames might get dropped. Set the control flag accordingly. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 098ae85..3456c04 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -781,9 +781,11 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) /* * Anything but QoS data that has a sequence number field * (is long enough) gets a sequence number from the global - * counter. + * counter. QoS data frames with a multicast destination + * also use the global counter (802.11-2012 9.3.2.10). */ - if (!ieee80211_is_data_qos(hdr->frame_control)) { + if (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) { /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ -- cgit v0.10.2 From 19504cf5f35fbe85db811fce9f4392a0cbdada2f Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 15 Aug 2013 14:51:28 +0300 Subject: cfg80211: add flags to cfg80211_rx_mgmt() Add flags intended to report various auxiliary information and introduce the NL80211_RXMGMT_FLAG_ANSWERED flag to report that the frame was already answered by the device. Signed-off-by: Vladimir Kondratiev [REPLIED->ANSWERED, reword commit message] Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 87aefb4..546d5da 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -568,8 +568,8 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len, dlen, freq, vif->probe_req_report); if (vif->probe_req_report || vif->nw_type == AP_NETWORK) - cfg80211_rx_mgmt(&vif->wdev, freq, 0, - ev->data, dlen, GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, + GFP_ATOMIC); return 0; } @@ -608,8 +608,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len, return -EINVAL; } ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); - cfg80211_rx_mgmt(&vif->wdev, freq, 0, - ev->data, dlen, GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, GFP_ATOMIC); return 0; } diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index dc8059a..21c791e 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -339,7 +339,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) } } else { cfg80211_rx_mgmt(wil->wdev, freq, signal, - (void *)rx_mgmt_frame, d_len, GFP_KERNEL); + (void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL); } } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 79555f0..d7a9745 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -1430,7 +1430,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, IEEE80211_BAND_5GHZ); wdev = &ifp->vif->wdev; - cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, + cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC); kfree(mgmt_frame); @@ -1895,7 +1895,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ); - cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, + cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC); brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n", diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index e57ac0d..5d9e150 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -171,8 +171,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, rx_pd->rx_pkt_length = cpu_to_le16(pkt_len); cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq, - CAL_RSSI(rx_pd->snr, rx_pd->nf), - skb->data, pkt_len, GFP_ATOMIC); + CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len, + 0, GFP_ATOMIC); return 0; } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9ab7a06..d530c54 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4056,6 +4056,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * @sig_dbm: signal strength in mBm, or 0 if unknown * @buf: Management frame (header + body) * @len: length of the frame data + * @flags: flags, as defined in enum nl80211_rxmgmt_flags * @gfp: context flags * * This function is called whenever an Action frame is received for a station @@ -4067,7 +4068,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * driver is responsible for rejecting the frame. */ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, - const u8 *buf, size_t len, gfp_t gfp); + const u8 *buf, size_t len, u32 flags, gfp_t gfp); /** * cfg80211_mgmt_tx_status - notification of TX status for management frame diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 1f42bc3..fde2c02 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1493,6 +1493,9 @@ enum nl80211_commands { * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter * field in the probe response (%NL80211_ATTR_PROBE_RESP). * + * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32. + * As specified in the &enum nl80211_rxmgmt_flags. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1801,6 +1804,8 @@ enum nl80211_attrs { NL80211_ATTR_CSA_C_OFF_BEACON, NL80211_ATTR_CSA_C_OFF_PRESP, + NL80211_ATTR_RXMGMT_FLAGS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3901,4 +3906,15 @@ enum nl80211_crit_proto_id { /* maximum duration for critical protocol measures */ #define NL80211_CRIT_PROTO_MAX_DURATION 5000 /* msec */ +/** + * enum nl80211_rxmgmt_flags - flags for received management frame. + * + * Used by cfg80211_rx_mgmt() + * + * @NL80211_RXMGMT_FLAG_ANSWERED: frame was answered by device/driver. + */ +enum nl80211_rxmgmt_flags { + NL80211_RXMGMT_FLAG_ANSWERED = 1 << 0, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index ffad155..0790105 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2678,8 +2678,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) sig = status->signal; if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, - rx->skb->data, rx->skb->len, - GFP_ATOMIC)) { + rx->skb->data, rx->skb->len, 0, GFP_ATOMIC)) { if (rx->sta) rx->sta->rx_packets++; dev_kfree_skb(rx->skb); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index bfac5e1..8d49c1c 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -621,7 +621,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, } bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, - const u8 *buf, size_t len, gfp_t gfp) + const u8 *buf, size_t len, u32 flags, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); @@ -664,7 +664,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, /* Indicate the received Action frame to user space */ if (nl80211_send_mgmt(rdev, wdev, reg->nlportid, freq, sig_mbm, - buf, len, gfp)) + buf, len, flags, gfp)) continue; result = true; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 334697d..a51269d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -10446,7 +10446,7 @@ EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 nlportid, int freq, int sig_dbm, - const u8 *buf, size_t len, gfp_t gfp) + const u8 *buf, size_t len, u32 flags, gfp_t gfp) { struct net_device *netdev = wdev->netdev; struct sk_buff *msg; @@ -10469,7 +10469,9 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || - nla_put(msg, NL80211_ATTR_FRAME, len, buf)) + nla_put(msg, NL80211_ATTR_FRAME, len, buf) || + (flags && + nla_put_u32(msg, NL80211_ATTR_RXMGMT_FLAGS, flags))) goto nla_put_failure; genlmsg_end(msg, hdr); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 44341bf..2c0f2b3 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -66,7 +66,7 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 nlpid, int freq, int sig_dbm, - const u8 *buf, size_t len, gfp_t gfp); + const u8 *buf, size_t len, u32 flags, gfp_t gfp); void nl80211_radar_notify(struct cfg80211_registered_device *rdev, -- cgit v0.10.2 From d70b7616d9080ec9f868fbd31db5fd4341435d61 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 23 Aug 2013 15:48:48 +0200 Subject: mac80211: ignore (E)CSA in probe response frames Seth reports that some APs, notably the Netgear WNDAP360, send invalid ECSA IEs in probe response frames with the operating class and channel number both set to zero, even when no channel switch is being done. As a result, any scan while connected to such an AP results in the connection being dropped. Fix this by ignoring any channel switch announcment in probe response frames entirely, since we're connected to the AP we will be receiving a beacon (and maybe even an action frame) if a channel switch is done, which is sufficient. Cc: stable@vger.kernel.org # 3.10 Reported-by: Seth Forshee Tested-by: Seth Forshee Signed-off-by: Johannes Berg diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index cc9e02d..7a98d52 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2851,14 +2851,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, ieee80211_rx_bss_put(local, bss); sdata->vif.bss_conf.beacon_rate = bss->beacon_rate; } - - if (!sdata->u.mgd.associated || - !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) - return; - - ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, - elems, true); - } @@ -3147,6 +3139,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); + ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, + &elems, true); + if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, elems.wmm_param_len)) changed |= BSS_CHANGED_QOS; -- cgit v0.10.2 From d57170b1b1d71382a0d9cf31b01364a97add3f19 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Tue, 30 Jul 2013 15:39:39 -0700 Subject: openvswitch: Use RCU lock for flow dump operation. Flow dump operation is read-only operation. There is no need to take ovs-lock. Following patch use rcu-lock for dumping flows. Signed-off-by: Pravin B Shelar Signed-off-by: Jesse Gross diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f2ed760..e6fb866 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1104,7 +1104,6 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, u32 seq, u32 flags, u8 cmd) { const int skb_orig_len = skb->len; - const struct sw_flow_actions *sf_acts; struct nlattr *start; struct ovs_flow_stats stats; struct ovs_header *ovs_header; @@ -1113,8 +1112,6 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, u8 tcp_flags; int err; - sf_acts = ovsl_dereference(flow->sf_acts); - ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); if (!ovs_header) return -EMSGSIZE; @@ -1161,6 +1158,11 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, */ start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS); if (start) { + const struct sw_flow_actions *sf_acts; + + sf_acts = rcu_dereference_check(flow->sf_acts, + lockdep_ovsl_is_held()); + err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb); if (!err) nla_nest_end(skb, start); @@ -1440,15 +1442,14 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) struct datapath *dp; struct flow_table *table; - ovs_lock(); + rcu_read_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); if (!dp) { - ovs_unlock(); + rcu_read_unlock(); return -ENODEV; } - table = ovsl_dereference(dp->table); - + table = rcu_dereference(dp->table); for (;;) { struct sw_flow *flow; u32 bucket, obj; @@ -1468,7 +1469,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->args[0] = bucket; cb->args[1] = obj; } - ovs_unlock(); + rcu_read_unlock(); return skb->len; } -- cgit v0.10.2 From 59a35d60af3ab80037ad2fa2d60671ce2818b9e4 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Tue, 30 Jul 2013 15:42:19 -0700 Subject: openvswitch: Use RCU lock for dp dump operation. RCUfy dp-dump operation which is already read-only. This makes all ovs dump operations lockless. Signed-off-by: Pravin B Shelar Signed-off-by: Jesse Gross diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e6fb866..9d97ef3 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -951,9 +951,10 @@ static struct genl_ops dp_packet_genl_ops[] = { static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) { + struct flow_table *table; int i; - struct flow_table *table = ovsl_dereference(dp->table); + table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held()); stats->n_flows = ovs_flow_tbl_count(table); stats->n_hit = stats->n_missed = stats->n_lost = 0; @@ -1665,7 +1666,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) goto err_destroy_local_port; ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); - list_add_tail(&dp->list_node, &ovs_net->dps); + list_add_tail_rcu(&dp->list_node, &ovs_net->dps); ovs_unlock(); @@ -1703,7 +1704,7 @@ static void __dp_destroy(struct datapath *dp) ovs_dp_detach_port(vport); } - list_del(&dp->list_node); + list_del_rcu(&dp->list_node); /* OVSP_LOCAL is datapath internal port. We need to make sure that * all port in datapath are destroyed first before freeing datapath. @@ -1808,8 +1809,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int skip = cb->args[0]; int i = 0; - ovs_lock(); - list_for_each_entry(dp, &ovs_net->dps, list_node) { + rcu_read_lock(); + list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, @@ -1817,7 +1818,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) break; i++; } - ovs_unlock(); + rcu_read_unlock(); cb->args[0] = i; -- cgit v0.10.2 From 76a66c7e7f6bec35bbdb0ca91db327c7c56d8c45 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Tue, 30 Jul 2013 15:45:59 -0700 Subject: openvswitch: Use non rcu hlist_del() flow table entry. Flow table destroy is done in rcu call-back context. Therefore there is no need to use rcu variant of hlist_del(). Signed-off-by: Pravin B Shelar Signed-off-by: Jesse Gross diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 1aa84dc..fca2825 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -302,7 +302,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table) int ver = table->node_ver; hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { - hlist_del_rcu(&flow->hash_node[ver]); + hlist_del(&flow->hash_node[ver]); ovs_flow_free(flow); } } -- cgit v0.10.2 From 2537b4dd0a70931a0c4d83d4402d0a5d78af3891 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 Jul 2013 14:01:54 +0200 Subject: openvswitch:: link upper device for port devices Link upper device properly. That will make IFLA_MASTER filled up. Set the master to port 0 of the datapath under which the port belongs. Signed-off-by: Jiri Pirko Signed-off-by: Jesse Gross diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 5982f3f..09d93c1 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -25,6 +25,7 @@ #include #include #include +#include #include @@ -74,6 +75,15 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; } +static struct net_device *get_dpdev(struct datapath *dp) +{ + struct vport *local; + + local = ovs_vport_ovsl(dp, OVSP_LOCAL); + BUG_ON(!local); + return netdev_vport_priv(local)->dev; +} + static struct vport *netdev_create(const struct vport_parms *parms) { struct vport *vport; @@ -103,10 +113,15 @@ static struct vport *netdev_create(const struct vport_parms *parms) } rtnl_lock(); + err = netdev_master_upper_dev_link(netdev_vport->dev, + get_dpdev(vport->dp)); + if (err) + goto error_unlock; + err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook, vport); if (err) - goto error_unlock; + goto error_master_upper_dev_unlink; dev_set_promiscuity(netdev_vport->dev, 1); netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; @@ -114,6 +129,8 @@ static struct vport *netdev_create(const struct vport_parms *parms) return vport; +error_master_upper_dev_unlink: + netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); error_unlock: rtnl_unlock(); error_put: @@ -140,6 +157,7 @@ static void netdev_destroy(struct vport *vport) rtnl_lock(); netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; netdev_rx_handler_unregister(netdev_vport->dev); + netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); dev_set_promiscuity(netdev_vport->dev, -1); rtnl_unlock(); -- cgit v0.10.2 From 2694838d608db701e5ed1e95d741cdbddd9aa864 Mon Sep 17 00:00:00 2001 From: Justin Pettit Date: Mon, 19 Aug 2013 17:49:29 -0700 Subject: openvswitch: Fix argument descriptions in vport.c. Signed-off-by: Justin Pettit Signed-off-by: Jesse Gross diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index d69e0c0..6f65dbe 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -203,7 +203,7 @@ out: * ovs_vport_set_options - modify existing vport device (for kernel callers) * * @vport: vport to modify. - * @port: New configuration. + * @options: New configuration. * * Modifies an existing device with the specified configuration (which is * dependent on device type). ovs_mutex must be held. @@ -328,6 +328,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) * * @vport: vport that received the packet * @skb: skb that was received + * @tun_key: tunnel (if any) that carried packet * * Must be called with rcu_read_lock. The packet cannot be shared and * skb->data should point to the Ethernet header. -- cgit v0.10.2 From 3fa34de67861abfc4846ccec886ca549d46ae56c Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 20 Aug 2013 10:48:15 -0700 Subject: openvswitch: check CONFIG_OPENVSWITCH_GRE in makefile Cc: Jesse Gross Cc: Pravin B Shelar Signed-off-by: Cong Wang Signed-off-by: Jesse Gross diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 82e4ee5..ea36e99 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile @@ -10,10 +10,13 @@ openvswitch-y := \ dp_notify.o \ flow.o \ vport.o \ - vport-gre.o \ vport-internal_dev.o \ vport-netdev.o ifneq ($(CONFIG_OPENVSWITCH_VXLAN),) openvswitch-y += vport-vxlan.o endif + +ifneq ($(CONFIG_OPENVSWITCH_GRE),) +openvswitch-y += vport-gre.o +endif diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 493e977..21d5073 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -16,7 +16,6 @@ * 02110-1301, USA */ -#ifdef CONFIG_OPENVSWITCH_GRE #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include @@ -271,5 +270,3 @@ const struct vport_ops ovs_gre_vport_ops = { .get_name = gre_get_name, .send = gre_tnl_send, }; - -#endif /* OPENVSWITCH_GRE */ -- cgit v0.10.2 From 03f0d916aa0317592dda11bd17c7357858719b6c Mon Sep 17 00:00:00 2001 From: Andy Zhou Date: Wed, 7 Aug 2013 20:01:00 -0700 Subject: openvswitch: Mega flow implementation Add wildcarded flow support in kernel datapath. Wildcarded flow can improve OVS flow set up performance by avoid sending matching new flows to the user space program. The exact performance boost will largely dependent on wildcarded flow hit rate. In case all new flows hits wildcard flows, the flow set up rate is within 5% of that of linux bridge module. Pravin has made significant contributions to this patch. Including API clean ups and bug fixes. Signed-off-by: Pravin B Shelar Signed-off-by: Andy Zhou Signed-off-by: Jesse Gross diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt index 8fa2dd1..37c20ee 100644 --- a/Documentation/networking/openvswitch.txt +++ b/Documentation/networking/openvswitch.txt @@ -91,6 +91,46 @@ Often we ellipsize arguments not important to the discussion, e.g.: in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...) +Wildcarded flow key format +-------------------------- + +A wildcarded flow is described with two sequences of Netlink attributes +passed over the Netlink socket. A flow key, exactly as described above, and an +optional corresponding flow mask. + +A wildcarded flow can represent a group of exact match flows. Each '1' bit +in the mask specifies a exact match with the corresponding bit in the flow key. +A '0' bit specifies a don't care bit, which will match either a '1' or '0' bit +of a incoming packet. Using wildcarded flow can improve the flow set up rate +by reduce the number of new flows need to be processed by the user space program. + +Support for the mask Netlink attribute is optional for both the kernel and user +space program. The kernel can ignore the mask attribute, installing an exact +match flow, or reduce the number of don't care bits in the kernel to less than +what was specified by the user space program. In this case, variations in bits +that the kernel does not implement will simply result in additional flow setups. +The kernel module will also work with user space programs that neither support +nor supply flow mask attributes. + +Since the kernel may ignore or modify wildcard bits, it can be difficult for +the userspace program to know exactly what matches are installed. There are +two possible approaches: reactively install flows as they miss the kernel +flow table (and therefore not attempt to determine wildcard changes at all) +or use the kernel's response messages to determine the installed wildcards. + +When interacting with userspace, the kernel should maintain the match portion +of the key exactly as originally installed. This will provides a handle to +identify the flow for all future operations. However, when reporting the +mask of an installed flow, the mask should include any restrictions imposed +by the kernel. + +The behavior when using overlapping wildcarded flows is undefined. It is the +responsibility of the user space program to ensure that any incoming packet +can match at most one flow, wildcarded or not. The current implementation +performs best-effort detection of overlapping wildcarded flows and may reject +some but not all of them. However, this behavior may change in future versions. + + Basic rule for evolving flow keys --------------------------------- diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 52490b0..de1fa5d 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2007-2011 Nicira Networks. + * Copyright (c) 2007-2013 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -379,6 +379,12 @@ struct ovs_key_nd { * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the * last-used time, accumulated TCP flags, and statistics for this flow. * Otherwise ignored in requests. Never present in notifications. + * @OVS_FLOW_ATTR_MASK: Nested %OVS_KEY_ATTR_* attributes specifying the + * mask bits for wildcarded flow match. Mask bit value '1' specifies exact + * match with corresponding flow key bit, while mask bit value '0' specifies + * a wildcarded match. Omitting attribute is treated as wildcarding all + * corresponding fields. Optional for all requests. If not present, + * all flow key bits are exact match bits. * * These attributes follow the &struct ovs_header within the Generic Netlink * payload for %OVS_FLOW_* commands. @@ -391,6 +397,7 @@ enum ovs_flow_attr { OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */ OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ + OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */ __OVS_FLOW_ATTR_MAX }; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ab101f7..1f68022 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2012 Nicira, Inc. + * Copyright (c) 2007-2013 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -376,8 +376,10 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, const struct nlattr *a; int rem; + BUG_ON(!OVS_CB(skb)->pkt_key); + upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.key = &OVS_CB(skb)->flow->key; + upcall.key = OVS_CB(skb)->pkt_key; upcall.userdata = NULL; upcall.portid = 0; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 9d97ef3..d29cd9a 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2012 Nicira, Inc. + * Copyright (c) 2007-2013 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -165,7 +165,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) { struct datapath *dp = container_of(rcu, struct datapath, rcu); - ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); + ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false); free_percpu(dp->stats_percpu); release_net(ovs_dp_get_net(dp)); kfree(dp->ports); @@ -226,19 +226,18 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) struct sw_flow_key key; u64 *stats_counter; int error; - int key_len; stats = this_cpu_ptr(dp->stats_percpu); /* Extract flow from 'skb' into 'key'. */ - error = ovs_flow_extract(skb, p->port_no, &key, &key_len); + error = ovs_flow_extract(skb, p->port_no, &key); if (unlikely(error)) { kfree_skb(skb); return; } /* Look up flow. */ - flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); + flow = ovs_flow_lookup(rcu_dereference(dp->table), &key); if (unlikely(!flow)) { struct dp_upcall_info upcall; @@ -253,6 +252,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) } OVS_CB(skb)->flow = flow; + OVS_CB(skb)->pkt_key = &key; stats_counter = &stats->n_hit; ovs_flow_used(OVS_CB(skb)->flow, skb); @@ -435,7 +435,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); - ovs_flow_to_nlattrs(upcall_info->key, user_skb); + ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) @@ -468,7 +468,7 @@ static int flush_flows(struct datapath *dp) rcu_assign_pointer(dp->table, new_table); - ovs_flow_tbl_deferred_destroy(old_table); + ovs_flow_tbl_destroy(old_table, true); return 0; } @@ -611,10 +611,12 @@ static int validate_tp_port(const struct sw_flow_key *flow_key) static int validate_and_copy_set_tun(const struct nlattr *attr, struct sw_flow_actions **sfa) { - struct ovs_key_ipv4_tunnel tun_key; + struct sw_flow_match match; + struct sw_flow_key key; int err, start; - err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &tun_key); + ovs_match_init(&match, &key, NULL); + err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false); if (err) return err; @@ -622,7 +624,8 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, if (start < 0) return start; - err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key)); + err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key, + sizeof(match.key->tun_key)); add_nested_action_end(*sfa, start); return err; @@ -857,7 +860,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct ethhdr *eth; int len; int err; - int key_len; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || @@ -890,11 +892,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(flow)) goto err_kfree_skb; - err = ovs_flow_extract(packet, -1, &flow->key, &key_len); + err = ovs_flow_extract(packet, -1, &flow->key); if (err) goto err_flow_free; - err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]); + err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]); if (err) goto err_flow_free; acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); @@ -908,6 +910,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err_flow_free; OVS_CB(packet)->flow = flow; + OVS_CB(packet)->pkt_key = &flow->key; packet->priority = flow->key.phy.priority; packet->mark = flow->key.phy.skb_mark; @@ -922,13 +925,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) local_bh_enable(); rcu_read_unlock(); - ovs_flow_free(flow); + ovs_flow_free(flow, false); return err; err_unlock: rcu_read_unlock(); err_flow_free: - ovs_flow_free(flow); + ovs_flow_free(flow, false); err_kfree_skb: kfree_skb(packet); err: @@ -1045,7 +1048,8 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) if (!start) return -EMSGSIZE; - err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key)); + err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key), + nla_data(ovs_key)); if (err) return err; nla_nest_end(skb, start); @@ -1093,6 +1097,7 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) { return NLMSG_ALIGN(sizeof(struct ovs_header)) + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ + + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */ + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ @@ -1119,12 +1124,25 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, ovs_header->dp_ifindex = get_dpifindex(dp); + /* Fill flow key. */ nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); if (!nla) goto nla_put_failure; - err = ovs_flow_to_nlattrs(&flow->key, skb); + + err = ovs_flow_to_nlattrs(&flow->unmasked_key, + &flow->unmasked_key, skb); + if (err) + goto error; + nla_nest_end(skb, nla); + + nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); + if (!nla) + goto nla_put_failure; + + err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb); if (err) goto error; + nla_nest_end(skb, nla); spin_lock_bh(&flow->lock); @@ -1214,20 +1232,24 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; - struct sw_flow_key key; - struct sw_flow *flow; + struct sw_flow_key key, masked_key; + struct sw_flow *flow = NULL; + struct sw_flow_mask mask; struct sk_buff *reply; struct datapath *dp; struct flow_table *table; struct sw_flow_actions *acts = NULL; + struct sw_flow_match match; int error; - int key_len; /* Extract key. */ error = -EINVAL; if (!a[OVS_FLOW_ATTR_KEY]) goto error; - error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); + + ovs_match_init(&match, &key, &mask); + error = ovs_match_from_nlattrs(&match, + a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); if (error) goto error; @@ -1238,9 +1260,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(acts)) goto error; - error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts); - if (error) + ovs_flow_key_mask(&masked_key, &key, &mask); + error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], + &masked_key, 0, &acts); + if (error) { + OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); goto err_kfree; + } } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { error = -EINVAL; goto error; @@ -1253,8 +1279,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) goto err_unlock_ovs; table = ovsl_dereference(dp->table); - flow = ovs_flow_tbl_lookup(table, &key, key_len); + + /* Check if this is a duplicate flow */ + flow = ovs_flow_lookup(table, &key); if (!flow) { + struct sw_flow_mask *mask_p; /* Bail out if we're not allowed to create a new flow. */ error = -ENOENT; if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) @@ -1267,7 +1296,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) new_table = ovs_flow_tbl_expand(table); if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); - ovs_flow_tbl_deferred_destroy(table); + ovs_flow_tbl_destroy(table, true); table = ovsl_dereference(dp->table); } } @@ -1280,14 +1309,30 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) } clear_stats(flow); + flow->key = masked_key; + flow->unmasked_key = key; + + /* Make sure mask is unique in the system */ + mask_p = ovs_sw_flow_mask_find(table, &mask); + if (!mask_p) { + /* Allocate a new mask if none exsits. */ + mask_p = ovs_sw_flow_mask_alloc(); + if (!mask_p) + goto err_flow_free; + mask_p->key = mask.key; + mask_p->range = mask.range; + ovs_sw_flow_mask_insert(table, mask_p); + } + + ovs_sw_flow_mask_add_ref(mask_p); + flow->mask = mask_p; rcu_assign_pointer(flow->sf_acts, acts); /* Put flow in bucket. */ - ovs_flow_tbl_insert(table, flow, &key, key_len); + ovs_flow_insert(table, flow); reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, - info->snd_seq, - OVS_FLOW_CMD_NEW); + info->snd_seq, OVS_FLOW_CMD_NEW); } else { /* We found a matching flow. */ struct sw_flow_actions *old_acts; @@ -1303,6 +1348,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) goto err_unlock_ovs; + /* The unmasked key has to be the same for flow updates. */ + error = -EINVAL; + if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) { + OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); + goto err_unlock_ovs; + } + /* Update actions. */ old_acts = ovsl_dereference(flow->sf_acts); rcu_assign_pointer(flow->sf_acts, acts); @@ -1327,6 +1379,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); return 0; +err_flow_free: + ovs_flow_free(flow, false); err_unlock_ovs: ovs_unlock(); err_kfree: @@ -1344,12 +1398,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) struct sw_flow *flow; struct datapath *dp; struct flow_table *table; + struct sw_flow_match match; int err; - int key_len; - if (!a[OVS_FLOW_ATTR_KEY]) + if (!a[OVS_FLOW_ATTR_KEY]) { + OVS_NLERR("Flow get message rejected, Key attribute missing.\n"); return -EINVAL; - err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); + } + + ovs_match_init(&match, &key, NULL); + err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); if (err) return err; @@ -1361,7 +1419,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) } table = ovsl_dereference(dp->table); - flow = ovs_flow_tbl_lookup(table, &key, key_len); + flow = ovs_flow_lookup_unmasked_key(table, &match); if (!flow) { err = -ENOENT; goto unlock; @@ -1390,8 +1448,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) struct sw_flow *flow; struct datapath *dp; struct flow_table *table; + struct sw_flow_match match; int err; - int key_len; ovs_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); @@ -1404,12 +1462,14 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) err = flush_flows(dp); goto unlock; } - err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); + + ovs_match_init(&match, &key, NULL); + err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); if (err) goto unlock; table = ovsl_dereference(dp->table); - flow = ovs_flow_tbl_lookup(table, &key, key_len); + flow = ovs_flow_lookup_unmasked_key(table, &match); if (!flow) { err = -ENOENT; goto unlock; @@ -1421,13 +1481,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) goto unlock; } - ovs_flow_tbl_remove(table, flow); + ovs_flow_remove(table, flow); err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, info->snd_seq, 0, OVS_FLOW_CMD_DEL); BUG_ON(err < 0); - ovs_flow_deferred_free(flow); + ovs_flow_free(flow, true); ovs_unlock(); ovs_notify(reply, info, &ovs_dp_flow_multicast_group); @@ -1457,7 +1517,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) bucket = cb->args[0]; obj = cb->args[1]; - flow = ovs_flow_tbl_next(table, &bucket, &obj); + flow = ovs_flow_dump_next(table, &bucket, &obj); if (!flow) break; @@ -1680,7 +1740,7 @@ err_destroy_ports_array: err_destroy_percpu: free_percpu(dp->stats_percpu); err_destroy_table: - ovs_flow_tbl_destroy(ovsl_dereference(dp->table)); + ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false); err_free_dp: release_net(ovs_dp_get_net(dp)); kfree(dp); @@ -2287,7 +2347,7 @@ static void rehash_flow_table(struct work_struct *work) new_table = ovs_flow_tbl_rehash(old_table); if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); - ovs_flow_tbl_deferred_destroy(old_table); + ovs_flow_tbl_destroy(old_table, true); } } } diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index a914864..4d109c1 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -88,11 +88,13 @@ struct datapath { /** * struct ovs_skb_cb - OVS data in skb CB * @flow: The flow associated with this packet. May be %NULL if no flow. + * @pkt_key: The flow information extracted from the packet. Must be nonnull. * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the * packet is not being tunneled. */ struct ovs_skb_cb { struct sw_flow *flow; + struct sw_flow_key *pkt_key; struct ovs_key_ipv4_tunnel *tun_key; }; #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) @@ -183,4 +185,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); void ovs_dp_notify_wq(struct work_struct *work); + +#define OVS_NLERR(fmt, ...) \ + pr_info_once("netlink: " fmt, ##__VA_ARGS__) + #endif /* datapath.h */ diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index fca2825..1fceb96 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2011 Nicira, Inc. + * Copyright (c) 2007-2013 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -46,6 +46,184 @@ static struct kmem_cache *flow_cache; +static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, + struct sw_flow_key_range *range, u8 val); + +static void update_range__(struct sw_flow_match *match, + size_t offset, size_t size, bool is_mask) +{ + struct sw_flow_key_range *range = NULL; + size_t start = offset; + size_t end = offset + size; + + if (!is_mask) + range = &match->range; + else if (match->mask) + range = &match->mask->range; + + if (!range) + return; + + if (range->start == range->end) { + range->start = start; + range->end = end; + return; + } + + if (range->start > start) + range->start = start; + + if (range->end < end) + range->end = end; +} + +#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ + do { \ + update_range__(match, offsetof(struct sw_flow_key, field), \ + sizeof((match)->key->field), is_mask); \ + if (is_mask) { \ + if ((match)->mask) \ + (match)->mask->key.field = value; \ + } else { \ + (match)->key->field = value; \ + } \ + } while (0) + +#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ + do { \ + update_range__(match, offsetof(struct sw_flow_key, field), \ + len, is_mask); \ + if (is_mask) { \ + if ((match)->mask) \ + memcpy(&(match)->mask->key.field, value_p, len);\ + } else { \ + memcpy(&(match)->key->field, value_p, len); \ + } \ + } while (0) + +void ovs_match_init(struct sw_flow_match *match, + struct sw_flow_key *key, + struct sw_flow_mask *mask) +{ + memset(match, 0, sizeof(*match)); + match->key = key; + match->mask = mask; + + memset(key, 0, sizeof(*key)); + + if (mask) { + memset(&mask->key, 0, sizeof(mask->key)); + mask->range.start = mask->range.end = 0; + } +} + +static bool ovs_match_validate(const struct sw_flow_match *match, + u64 key_attrs, u64 mask_attrs) +{ + u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; + u64 mask_allowed = key_attrs; /* At most allow all key attributes */ + + /* The following mask attributes allowed only if they + * pass the validation tests. */ + mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) + | (1 << OVS_KEY_ATTR_IPV6) + | (1 << OVS_KEY_ATTR_TCP) + | (1 << OVS_KEY_ATTR_UDP) + | (1 << OVS_KEY_ATTR_ICMP) + | (1 << OVS_KEY_ATTR_ICMPV6) + | (1 << OVS_KEY_ATTR_ARP) + | (1 << OVS_KEY_ATTR_ND)); + + /* Always allowed mask fields. */ + mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) + | (1 << OVS_KEY_ATTR_IN_PORT) + | (1 << OVS_KEY_ATTR_ETHERTYPE)); + + /* Check key attributes. */ + if (match->key->eth.type == htons(ETH_P_ARP) + || match->key->eth.type == htons(ETH_P_RARP)) { + key_expected |= 1 << OVS_KEY_ATTR_ARP; + if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + mask_allowed |= 1 << OVS_KEY_ATTR_ARP; + } + + if (match->key->eth.type == htons(ETH_P_IP)) { + key_expected |= 1 << OVS_KEY_ATTR_IPV4; + if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; + + if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { + if (match->key->ip.proto == IPPROTO_UDP) { + key_expected |= 1 << OVS_KEY_ATTR_UDP; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_UDP; + } + + if (match->key->ip.proto == IPPROTO_TCP) { + key_expected |= 1 << OVS_KEY_ATTR_TCP; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_TCP; + } + + if (match->key->ip.proto == IPPROTO_ICMP) { + key_expected |= 1 << OVS_KEY_ATTR_ICMP; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_ICMP; + } + } + } + + if (match->key->eth.type == htons(ETH_P_IPV6)) { + key_expected |= 1 << OVS_KEY_ATTR_IPV6; + if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; + + if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { + if (match->key->ip.proto == IPPROTO_UDP) { + key_expected |= 1 << OVS_KEY_ATTR_UDP; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_UDP; + } + + if (match->key->ip.proto == IPPROTO_TCP) { + key_expected |= 1 << OVS_KEY_ATTR_TCP; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_TCP; + } + + if (match->key->ip.proto == IPPROTO_ICMPV6) { + key_expected |= 1 << OVS_KEY_ATTR_ICMPV6; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; + + if (match->key->ipv6.tp.src == + htons(NDISC_NEIGHBOUR_SOLICITATION) || + match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { + key_expected |= 1 << OVS_KEY_ATTR_ND; + if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) + mask_allowed |= 1 << OVS_KEY_ATTR_ND; + } + } + } + } + + if ((key_attrs & key_expected) != key_expected) { + /* Key attributes check failed. */ + OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", + key_attrs, key_expected); + return false; + } + + if ((mask_attrs & mask_allowed) != mask_attrs) { + /* Mask attributes check failed. */ + OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", + mask_attrs, mask_allowed); + return false; + } + + return true; +} + static int check_header(struct sk_buff *skb, int len) { if (unlikely(skb->len < len)) @@ -121,12 +299,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) return cur_ms - idle_ms; } -#define SW_FLOW_KEY_OFFSET(field) \ - (offsetof(struct sw_flow_key, field) + \ - FIELD_SIZEOF(struct sw_flow_key, field)) - -static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, - int *key_lenp) +static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) { unsigned int nh_ofs = skb_network_offset(skb); unsigned int nh_len; @@ -136,8 +309,6 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, __be16 frag_off; int err; - *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); - err = check_header(skb, nh_ofs + sizeof(*nh)); if (unlikely(err)) return err; @@ -176,6 +347,21 @@ static bool icmp6hdr_ok(struct sk_buff *skb) sizeof(struct icmp6hdr)); } +void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, + const struct sw_flow_mask *mask) +{ + u8 *m = (u8 *)&mask->key + mask->range.start; + u8 *s = (u8 *)src + mask->range.start; + u8 *d = (u8 *)dst + mask->range.start; + int i; + + memset(dst, 0, sizeof(*dst)); + for (i = 0; i < ovs_sw_flow_mask_size_roundup(mask); i++) { + *d = *s & *m; + d++, s++, m++; + } +} + #define TCP_FLAGS_OFFSET 13 #define TCP_FLAG_MASK 0x3f @@ -224,6 +410,7 @@ struct sw_flow *ovs_flow_alloc(void) spin_lock_init(&flow->lock); flow->sf_acts = NULL; + flow->mask = NULL; return flow; } @@ -263,7 +450,7 @@ static void free_buckets(struct flex_array *buckets) flex_array_free(buckets); } -struct flow_table *ovs_flow_tbl_alloc(int new_size) +static struct flow_table *__flow_tbl_alloc(int new_size) { struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); @@ -281,17 +468,15 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size) table->node_ver = 0; table->keep_flows = false; get_random_bytes(&table->hash_seed, sizeof(u32)); + table->mask_list = NULL; return table; } -void ovs_flow_tbl_destroy(struct flow_table *table) +static void __flow_tbl_destroy(struct flow_table *table) { int i; - if (!table) - return; - if (table->keep_flows) goto skip_flows; @@ -303,31 +488,55 @@ void ovs_flow_tbl_destroy(struct flow_table *table) hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { hlist_del(&flow->hash_node[ver]); - ovs_flow_free(flow); + ovs_flow_free(flow, false); } } + BUG_ON(!list_empty(table->mask_list)); + kfree(table->mask_list); + skip_flows: free_buckets(table->buckets); kfree(table); } +struct flow_table *ovs_flow_tbl_alloc(int new_size) +{ + struct flow_table *table = __flow_tbl_alloc(new_size); + + if (!table) + return NULL; + + table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (!table->mask_list) { + table->keep_flows = true; + __flow_tbl_destroy(table); + return NULL; + } + INIT_LIST_HEAD(table->mask_list); + + return table; +} + static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) { struct flow_table *table = container_of(rcu, struct flow_table, rcu); - ovs_flow_tbl_destroy(table); + __flow_tbl_destroy(table); } -void ovs_flow_tbl_deferred_destroy(struct flow_table *table) +void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) { if (!table) return; - call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); + if (deferred) + call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); + else + __flow_tbl_destroy(table); } -struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) +struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last) { struct sw_flow *flow; struct hlist_head *head; @@ -353,11 +562,13 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la return NULL; } -static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) +static void __tbl_insert(struct flow_table *table, struct sw_flow *flow) { struct hlist_head *head; + head = find_bucket(table, flow->hash); hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); + table->count++; } @@ -377,8 +588,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new head = flex_array_get(old->buckets, i); hlist_for_each_entry(flow, head, hash_node[old_ver]) - __flow_tbl_insert(new, flow); + __tbl_insert(new, flow); } + + new->mask_list = old->mask_list; old->keep_flows = true; } @@ -386,7 +599,7 @@ static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buck { struct flow_table *new_table; - new_table = ovs_flow_tbl_alloc(n_buckets); + new_table = __flow_tbl_alloc(n_buckets); if (!new_table) return ERR_PTR(-ENOMEM); @@ -405,28 +618,30 @@ struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) return __flow_tbl_rehash(table, table->n_buckets * 2); } -void ovs_flow_free(struct sw_flow *flow) +static void __flow_free(struct sw_flow *flow) { - if (unlikely(!flow)) - return; - kfree((struct sf_flow_acts __force *)flow->sf_acts); kmem_cache_free(flow_cache, flow); } -/* RCU callback used by ovs_flow_deferred_free. */ static void rcu_free_flow_callback(struct rcu_head *rcu) { struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); - ovs_flow_free(flow); + __flow_free(flow); } -/* Schedules 'flow' to be freed after the next RCU grace period. - * The caller must hold rcu_read_lock for this to be sensible. */ -void ovs_flow_deferred_free(struct sw_flow *flow) +void ovs_flow_free(struct sw_flow *flow, bool deferred) { - call_rcu(&flow->rcu, rcu_free_flow_callback); + if (!flow) + return; + + ovs_sw_flow_mask_del_ref(flow->mask, deferred); + + if (deferred) + call_rcu(&flow->rcu, rcu_free_flow_callback); + else + __flow_free(flow); } /* Schedules 'sf_acts' to be freed after the next RCU grace period. @@ -497,18 +712,15 @@ static __be16 parse_ethertype(struct sk_buff *skb) } static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, - int *key_lenp, int nh_len) + int nh_len) { struct icmp6hdr *icmp = icmp6_hdr(skb); - int error = 0; - int key_len; /* The ICMPv6 type and code fields use the 16-bit transport port * fields, so we need to store them in 16-bit network byte order. */ key->ipv6.tp.src = htons(icmp->icmp6_type); key->ipv6.tp.dst = htons(icmp->icmp6_code); - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (icmp->icmp6_code == 0 && (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || @@ -517,21 +729,17 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, struct nd_msg *nd; int offset; - key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); - /* In order to process neighbor discovery options, we need the * entire packet. */ if (unlikely(icmp_len < sizeof(*nd))) - goto out; - if (unlikely(skb_linearize(skb))) { - error = -ENOMEM; - goto out; - } + return 0; + + if (unlikely(skb_linearize(skb))) + return -ENOMEM; nd = (struct nd_msg *)skb_transport_header(skb); key->ipv6.nd.target = nd->target; - key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); icmp_len -= sizeof(*nd); offset = 0; @@ -541,7 +749,7 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, int opt_len = nd_opt->nd_opt_len * 8; if (unlikely(!opt_len || opt_len > icmp_len)) - goto invalid; + return 0; /* Store the link layer address if the appropriate * option is provided. It is considered an error if @@ -566,16 +774,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, } } - goto out; + return 0; invalid: memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); -out: - *key_lenp = key_len; - return error; + return 0; } /** @@ -584,7 +790,6 @@ out: * Ethernet header * @in_port: port number on which @skb was received. * @key: output flow key - * @key_lenp: length of output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * @@ -602,11 +807,9 @@ out: * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ -int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, - int *key_lenp) +int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) { - int error = 0; - int key_len = SW_FLOW_KEY_OFFSET(eth); + int error; struct ethhdr *eth; memset(key, 0, sizeof(*key)); @@ -649,15 +852,13 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, struct iphdr *nh; __be16 offset; - key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); - error = check_iphdr(skb); if (unlikely(error)) { if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } - goto out; + return error; } nh = ip_hdr(skb); @@ -671,7 +872,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; - goto out; + return 0; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) @@ -679,21 +880,18 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { - key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv4.tp.src = tcp->source; key->ipv4.tp.dst = tcp->dest; } } else if (key->ip.proto == IPPROTO_UDP) { - key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv4.tp.src = udp->source; key->ipv4.tp.dst = udp->dest; } } else if (key->ip.proto == IPPROTO_ICMP) { - key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit @@ -722,53 +920,49 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); - key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ - nh_len = parse_ipv6hdr(skb, key, &key_len); + nh_len = parse_ipv6hdr(skb, key); if (unlikely(nh_len < 0)) { - if (nh_len == -EINVAL) + if (nh_len == -EINVAL) { skb->transport_header = skb->network_header; - else + error = 0; + } else { error = nh_len; - goto out; + } + return error; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) - goto out; + return 0; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->ipv6.tp.src = tcp->source; key->ipv6.tp.dst = tcp->dest; } } else if (key->ip.proto == NEXTHDR_UDP) { - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->ipv6.tp.src = udp->source; key->ipv6.tp.dst = udp->dest; } } else if (key->ip.proto == NEXTHDR_ICMP) { - key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); if (icmp6hdr_ok(skb)) { - error = parse_icmpv6(skb, key, &key_len, nh_len); - if (error < 0) - goto out; + error = parse_icmpv6(skb, key, nh_len); + if (error) + return error; } } } -out: - *key_lenp = key_len; - return error; + return 0; } static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len) @@ -777,7 +971,7 @@ static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_l DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0); } -static int flow_key_start(struct sw_flow_key *key) +static int flow_key_start(const struct sw_flow_key *key) { if (key->tun_key.ipv4_dst) return 0; @@ -785,39 +979,95 @@ static int flow_key_start(struct sw_flow_key *key) return offsetof(struct sw_flow_key, phy); } -struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, - struct sw_flow_key *key, int key_len) +static bool __cmp_key(const struct sw_flow_key *key1, + const struct sw_flow_key *key2, int key_start, int key_len) +{ + return !memcmp((u8 *)key1 + key_start, + (u8 *)key2 + key_start, (key_len - key_start)); +} + +static bool __flow_cmp_key(const struct sw_flow *flow, + const struct sw_flow_key *key, int key_start, int key_len) +{ + return __cmp_key(&flow->key, key, key_start, key_len); +} + +static bool __flow_cmp_unmasked_key(const struct sw_flow *flow, + const struct sw_flow_key *key, int key_start, int key_len) +{ + return __cmp_key(&flow->unmasked_key, key, key_start, key_len); +} + +bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, + const struct sw_flow_key *key, int key_len) +{ + int key_start; + key_start = flow_key_start(key); + + return __flow_cmp_unmasked_key(flow, key, key_start, key_len); + +} + +struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, + struct sw_flow_match *match) +{ + struct sw_flow_key *unmasked = match->key; + int key_len = match->range.end; + struct sw_flow *flow; + + flow = ovs_flow_lookup(table, unmasked); + if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_len))) + flow = NULL; + + return flow; +} + +static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, + const struct sw_flow_key *flow_key, + struct sw_flow_mask *mask) { struct sw_flow *flow; struct hlist_head *head; - u8 *_key; - int key_start; + int key_start = mask->range.start; + int key_len = mask->range.end; u32 hash; + struct sw_flow_key masked_key; - key_start = flow_key_start(key); - hash = ovs_flow_hash(key, key_start, key_len); - - _key = (u8 *) key + key_start; + ovs_flow_key_mask(&masked_key, flow_key, mask); + hash = ovs_flow_hash(&masked_key, key_start, key_len); head = find_bucket(table, hash); hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { - - if (flow->hash == hash && - !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) { + if (flow->mask == mask && + __flow_cmp_key(flow, &masked_key, key_start, key_len)) return flow; - } } return NULL; } -void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, - struct sw_flow_key *key, int key_len) +struct sw_flow *ovs_flow_lookup(struct flow_table *tbl, + const struct sw_flow_key *key) { - flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len); - memcpy(&flow->key, key, sizeof(flow->key)); - __flow_tbl_insert(table, flow); + struct sw_flow *flow = NULL; + struct sw_flow_mask *mask; + + list_for_each_entry_rcu(mask, tbl->mask_list, list) { + flow = ovs_masked_flow_lookup(tbl, key, mask); + if (flow) /* Found */ + break; + } + + return flow; } -void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) + +void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow) +{ + flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start, + flow->mask->range.end); + __tbl_insert(table, flow); +} + +void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow) { BUG_ON(table->count == 0); hlist_del_rcu(&flow->hash_node[table->node_ver]); @@ -844,149 +1094,84 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_TUNNEL] = -1, }; -static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, - const struct nlattr *a[], u32 *attrs) -{ - const struct ovs_key_icmp *icmp_key; - const struct ovs_key_tcp *tcp_key; - const struct ovs_key_udp *udp_key; - - switch (swkey->ip.proto) { - case IPPROTO_TCP: - if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_TCP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); - swkey->ipv4.tp.src = tcp_key->tcp_src; - swkey->ipv4.tp.dst = tcp_key->tcp_dst; - break; - - case IPPROTO_UDP: - if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_UDP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); - swkey->ipv4.tp.src = udp_key->udp_src; - swkey->ipv4.tp.dst = udp_key->udp_dst; - break; - - case IPPROTO_ICMP: - if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_ICMP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); - icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); - swkey->ipv4.tp.src = htons(icmp_key->icmp_type); - swkey->ipv4.tp.dst = htons(icmp_key->icmp_code); - break; - } - - return 0; -} - -static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, - const struct nlattr *a[], u32 *attrs) +static bool is_all_zero(const u8 *fp, size_t size) { - const struct ovs_key_icmpv6 *icmpv6_key; - const struct ovs_key_tcp *tcp_key; - const struct ovs_key_udp *udp_key; - - switch (swkey->ip.proto) { - case IPPROTO_TCP: - if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_TCP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); - swkey->ipv6.tp.src = tcp_key->tcp_src; - swkey->ipv6.tp.dst = tcp_key->tcp_dst; - break; - - case IPPROTO_UDP: - if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_UDP); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); - swkey->ipv6.tp.src = udp_key->udp_src; - swkey->ipv6.tp.dst = udp_key->udp_dst; - break; - - case IPPROTO_ICMPV6: - if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); - icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); - swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type); - swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code); + int i; - if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || - swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { - const struct ovs_key_nd *nd_key; + if (!fp) + return false; - if (!(*attrs & (1 << OVS_KEY_ATTR_ND))) - return -EINVAL; - *attrs &= ~(1 << OVS_KEY_ATTR_ND); - - *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); - nd_key = nla_data(a[OVS_KEY_ATTR_ND]); - memcpy(&swkey->ipv6.nd.target, nd_key->nd_target, - sizeof(swkey->ipv6.nd.target)); - memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN); - memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN); - } - break; - } + for (i = 0; i < size; i++) + if (fp[i]) + return false; - return 0; + return true; } -static int parse_flow_nlattrs(const struct nlattr *attr, - const struct nlattr *a[], u32 *attrsp) +static int __parse_flow_nlattrs(const struct nlattr *attr, + const struct nlattr *a[], + u64 *attrsp, bool nz) { const struct nlattr *nla; u32 attrs; int rem; - attrs = 0; + attrs = *attrsp; nla_for_each_nested(nla, attr, rem) { u16 type = nla_type(nla); int expected_len; - if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type)) + if (type > OVS_KEY_ATTR_MAX) { + OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", + type, OVS_KEY_ATTR_MAX); + } + + if (attrs & (1 << type)) { + OVS_NLERR("Duplicate key attribute (type %d).\n", type); return -EINVAL; + } expected_len = ovs_key_lens[type]; - if (nla_len(nla) != expected_len && expected_len != -1) + if (nla_len(nla) != expected_len && expected_len != -1) { + OVS_NLERR("Key attribute has unexpected length (type=%d" + ", length=%d, expected=%d).\n", type, + nla_len(nla), expected_len); return -EINVAL; + } - attrs |= 1 << type; - a[type] = nla; + if (!nz || !is_all_zero(nla_data(nla), expected_len)) { + attrs |= 1 << type; + a[type] = nla; + } } - if (rem) + if (rem) { + OVS_NLERR("Message has %d unknown bytes.\n", rem); return -EINVAL; + } *attrsp = attrs; return 0; } +static int parse_flow_mask_nlattrs(const struct nlattr *attr, + const struct nlattr *a[], u64 *attrsp) +{ + return __parse_flow_nlattrs(attr, a, attrsp, true); +} + +static int parse_flow_nlattrs(const struct nlattr *attr, + const struct nlattr *a[], u64 *attrsp) +{ + return __parse_flow_nlattrs(attr, a, attrsp, false); +} + int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, - struct ovs_key_ipv4_tunnel *tun_key) + struct sw_flow_match *match, bool is_mask) { struct nlattr *a; int rem; bool ttl = false; - - memset(tun_key, 0, sizeof(*tun_key)); + __be16 tun_flags = 0; nla_for_each_nested(a, attr, rem) { int type = nla_type(a); @@ -1000,53 +1185,78 @@ int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, }; - if (type > OVS_TUNNEL_KEY_ATTR_MAX || - ovs_tunnel_key_lens[type] != nla_len(a)) + if (type > OVS_TUNNEL_KEY_ATTR_MAX) { + OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", + type, OVS_TUNNEL_KEY_ATTR_MAX); return -EINVAL; + } + + if (ovs_tunnel_key_lens[type] != nla_len(a)) { + OVS_NLERR("IPv4 tunnel attribute type has unexpected " + " length (type=%d, length=%d, expected=%d).\n", + type, nla_len(a), ovs_tunnel_key_lens[type]); + return -EINVAL; + } switch (type) { case OVS_TUNNEL_KEY_ATTR_ID: - tun_key->tun_id = nla_get_be64(a); - tun_key->tun_flags |= TUNNEL_KEY; + SW_FLOW_KEY_PUT(match, tun_key.tun_id, + nla_get_be64(a), is_mask); + tun_flags |= TUNNEL_KEY; break; case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: - tun_key->ipv4_src = nla_get_be32(a); + SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, + nla_get_be32(a), is_mask); break; case OVS_TUNNEL_KEY_ATTR_IPV4_DST: - tun_key->ipv4_dst = nla_get_be32(a); + SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst, + nla_get_be32(a), is_mask); break; case OVS_TUNNEL_KEY_ATTR_TOS: - tun_key->ipv4_tos = nla_get_u8(a); + SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos, + nla_get_u8(a), is_mask); break; case OVS_TUNNEL_KEY_ATTR_TTL: - tun_key->ipv4_ttl = nla_get_u8(a); + SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl, + nla_get_u8(a), is_mask); ttl = true; break; case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: - tun_key->tun_flags |= TUNNEL_DONT_FRAGMENT; + tun_flags |= TUNNEL_DONT_FRAGMENT; break; case OVS_TUNNEL_KEY_ATTR_CSUM: - tun_key->tun_flags |= TUNNEL_CSUM; + tun_flags |= TUNNEL_CSUM; break; default: return -EINVAL; - } } - if (rem > 0) - return -EINVAL; - if (!tun_key->ipv4_dst) - return -EINVAL; + SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); - if (!ttl) + if (rem > 0) { + OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); return -EINVAL; + } + + if (!is_mask) { + if (!match->key->tun_key.ipv4_dst) { + OVS_NLERR("IPv4 tunnel destination address is zero.\n"); + return -EINVAL; + } + + if (!ttl) { + OVS_NLERR("IPv4 tunnel TTL not specified.\n"); + return -EINVAL; + } + } return 0; } int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, - const struct ovs_key_ipv4_tunnel *tun_key) + const struct ovs_key_ipv4_tunnel *tun_key, + const struct ovs_key_ipv4_tunnel *output) { struct nlattr *nla; @@ -1054,23 +1264,24 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, if (!nla) return -EMSGSIZE; - if (tun_key->tun_flags & TUNNEL_KEY && - nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id)) + if (output->tun_flags & TUNNEL_KEY && + nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) return -EMSGSIZE; - if (tun_key->ipv4_src && - nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src)) + if (output->ipv4_src && + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) return -EMSGSIZE; - if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst)) + if (output->ipv4_dst && + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) return -EMSGSIZE; - if (tun_key->ipv4_tos && - nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos)) + if (output->ipv4_tos && + nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) return -EMSGSIZE; - if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl)) + if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) return -EMSGSIZE; - if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) && + if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) return -EMSGSIZE; - if ((tun_key->tun_flags & TUNNEL_CSUM) && + if ((output->tun_flags & TUNNEL_CSUM) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) return -EMSGSIZE; @@ -1078,176 +1289,372 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, return 0; } -/** - * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. - * @swkey: receives the extracted flow key. - * @key_lenp: number of bytes used in @swkey. - * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute - * sequence. - */ -int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, - const struct nlattr *attr) +static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, + const struct nlattr **a, bool is_mask) { - const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; - const struct ovs_key_ethernet *eth_key; - int key_len; - u32 attrs; - int err; + if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { + SW_FLOW_KEY_PUT(match, phy.priority, + nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); + *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); + } - memset(swkey, 0, sizeof(struct sw_flow_key)); - key_len = SW_FLOW_KEY_OFFSET(eth); + if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { + u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); - err = parse_flow_nlattrs(attr, a, &attrs); - if (err) - return err; + if (is_mask) + in_port = 0xffffffff; /* Always exact match in_port. */ + else if (in_port >= DP_MAX_PORTS) + return -EINVAL; - /* Metadata attributes. */ - if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { - swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]); - attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); + SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); + *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); + } else if (!is_mask) { + SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); } - if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { - u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); - if (in_port >= DP_MAX_PORTS) - return -EINVAL; - swkey->phy.in_port = in_port; - attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); - } else { - swkey->phy.in_port = DP_MAX_PORTS; + + if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { + uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); + + SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); + *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); } - if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { - swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); - attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); + if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { + if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, + is_mask)) + return -EINVAL; + *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); } + return 0; +} - if (attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { - err = ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key); - if (err) - return err; +static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, + const struct nlattr **a, bool is_mask) +{ + int err; + u64 orig_attrs = attrs; - attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); - } + err = metadata_from_nlattrs(match, &attrs, a, is_mask); + if (err) + return err; - /* Data attributes. */ - if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); + if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) { + const struct ovs_key_ethernet *eth_key; - eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); - memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN); - memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN); + eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); + SW_FLOW_KEY_MEMCPY(match, eth.src, + eth_key->eth_src, ETH_ALEN, is_mask); + SW_FLOW_KEY_MEMCPY(match, eth.dst, + eth_key->eth_dst, ETH_ALEN, is_mask); + attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); + } - if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) && - nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) { - const struct nlattr *encap; + if (attrs & (1 << OVS_KEY_ATTR_VLAN)) { __be16 tci; - if (attrs != ((1 << OVS_KEY_ATTR_VLAN) | - (1 << OVS_KEY_ATTR_ETHERTYPE) | - (1 << OVS_KEY_ATTR_ENCAP))) - return -EINVAL; - - encap = a[OVS_KEY_ATTR_ENCAP]; tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); - if (tci & htons(VLAN_TAG_PRESENT)) { - swkey->eth.tci = tci; - - err = parse_flow_nlattrs(encap, a, &attrs); - if (err) - return err; - } else if (!tci) { - /* Corner case for truncated 802.1Q header. */ - if (nla_len(encap)) - return -EINVAL; + if (!(tci & htons(VLAN_TAG_PRESENT))) { + if (is_mask) + OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); + else + OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); - swkey->eth.type = htons(ETH_P_8021Q); - *key_lenp = key_len; - return 0; - } else { return -EINVAL; } - } + + SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); + attrs &= ~(1 << OVS_KEY_ATTR_VLAN); + } else if (!is_mask) + SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { - swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); - if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN) + __be16 eth_type; + + eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); + if (is_mask) { + /* Always exact match EtherType. */ + eth_type = htons(0xffff); + } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { + OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", + ntohs(eth_type), ETH_P_802_3_MIN); return -EINVAL; + } + + SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); - } else { - swkey->eth.type = htons(ETH_P_802_2); + } else if (!is_mask) { + SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); } - if (swkey->eth.type == htons(ETH_P_IP)) { + if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { const struct ovs_key_ipv4 *ipv4_key; - if (!(attrs & (1 << OVS_KEY_ATTR_IPV4))) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_IPV4); - - key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); - if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) + if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { + OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", + ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; - swkey->ip.proto = ipv4_key->ipv4_proto; - swkey->ip.tos = ipv4_key->ipv4_tos; - swkey->ip.ttl = ipv4_key->ipv4_ttl; - swkey->ip.frag = ipv4_key->ipv4_frag; - swkey->ipv4.addr.src = ipv4_key->ipv4_src; - swkey->ipv4.addr.dst = ipv4_key->ipv4_dst; - - if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { - err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs); - if (err) - return err; } - } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - const struct ovs_key_ipv6 *ipv6_key; + SW_FLOW_KEY_PUT(match, ip.proto, + ipv4_key->ipv4_proto, is_mask); + SW_FLOW_KEY_PUT(match, ip.tos, + ipv4_key->ipv4_tos, is_mask); + SW_FLOW_KEY_PUT(match, ip.ttl, + ipv4_key->ipv4_ttl, is_mask); + SW_FLOW_KEY_PUT(match, ip.frag, + ipv4_key->ipv4_frag, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.addr.src, + ipv4_key->ipv4_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.addr.dst, + ipv4_key->ipv4_dst, is_mask); + attrs &= ~(1 << OVS_KEY_ATTR_IPV4); + } - if (!(attrs & (1 << OVS_KEY_ATTR_IPV6))) - return -EINVAL; - attrs &= ~(1 << OVS_KEY_ATTR_IPV6); + if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { + const struct ovs_key_ipv6 *ipv6_key; - key_len = SW_FLOW_KEY_OFFSET(ipv6.label); ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); - if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) + if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { + OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", + ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; - swkey->ipv6.label = ipv6_key->ipv6_label; - swkey->ip.proto = ipv6_key->ipv6_proto; - swkey->ip.tos = ipv6_key->ipv6_tclass; - swkey->ip.ttl = ipv6_key->ipv6_hlimit; - swkey->ip.frag = ipv6_key->ipv6_frag; - memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src, - sizeof(swkey->ipv6.addr.src)); - memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst, - sizeof(swkey->ipv6.addr.dst)); - - if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { - err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs); - if (err) - return err; } - } else if (swkey->eth.type == htons(ETH_P_ARP) || - swkey->eth.type == htons(ETH_P_RARP)) { + SW_FLOW_KEY_PUT(match, ipv6.label, + ipv6_key->ipv6_label, is_mask); + SW_FLOW_KEY_PUT(match, ip.proto, + ipv6_key->ipv6_proto, is_mask); + SW_FLOW_KEY_PUT(match, ip.tos, + ipv6_key->ipv6_tclass, is_mask); + SW_FLOW_KEY_PUT(match, ip.ttl, + ipv6_key->ipv6_hlimit, is_mask); + SW_FLOW_KEY_PUT(match, ip.frag, + ipv6_key->ipv6_frag, is_mask); + SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src, + ipv6_key->ipv6_src, + sizeof(match->key->ipv6.addr.src), + is_mask); + SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst, + ipv6_key->ipv6_dst, + sizeof(match->key->ipv6.addr.dst), + is_mask); + + attrs &= ~(1 << OVS_KEY_ATTR_IPV6); + } + + if (attrs & (1 << OVS_KEY_ATTR_ARP)) { const struct ovs_key_arp *arp_key; - if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) + arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); + if (!is_mask && (arp_key->arp_op & htons(0xff00))) { + OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", + arp_key->arp_op); return -EINVAL; + } + + SW_FLOW_KEY_PUT(match, ipv4.addr.src, + arp_key->arp_sip, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.addr.dst, + arp_key->arp_tip, is_mask); + SW_FLOW_KEY_PUT(match, ip.proto, + ntohs(arp_key->arp_op), is_mask); + SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha, + arp_key->arp_sha, ETH_ALEN, is_mask); + SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha, + arp_key->arp_tha, ETH_ALEN, is_mask); + attrs &= ~(1 << OVS_KEY_ATTR_ARP); + } - key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); - arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); - swkey->ipv4.addr.src = arp_key->arp_sip; - swkey->ipv4.addr.dst = arp_key->arp_tip; - if (arp_key->arp_op & htons(0xff00)) + if (attrs & (1 << OVS_KEY_ATTR_TCP)) { + const struct ovs_key_tcp *tcp_key; + + tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); + if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { + SW_FLOW_KEY_PUT(match, ipv4.tp.src, + tcp_key->tcp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.tp.dst, + tcp_key->tcp_dst, is_mask); + } else { + SW_FLOW_KEY_PUT(match, ipv6.tp.src, + tcp_key->tcp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv6.tp.dst, + tcp_key->tcp_dst, is_mask); + } + attrs &= ~(1 << OVS_KEY_ATTR_TCP); + } + + if (attrs & (1 << OVS_KEY_ATTR_UDP)) { + const struct ovs_key_udp *udp_key; + + udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); + if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { + SW_FLOW_KEY_PUT(match, ipv4.tp.src, + udp_key->udp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.tp.dst, + udp_key->udp_dst, is_mask); + } else { + SW_FLOW_KEY_PUT(match, ipv6.tp.src, + udp_key->udp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv6.tp.dst, + udp_key->udp_dst, is_mask); + } + attrs &= ~(1 << OVS_KEY_ATTR_UDP); + } + + if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { + const struct ovs_key_icmp *icmp_key; + + icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); + SW_FLOW_KEY_PUT(match, ipv4.tp.src, + htons(icmp_key->icmp_type), is_mask); + SW_FLOW_KEY_PUT(match, ipv4.tp.dst, + htons(icmp_key->icmp_code), is_mask); + attrs &= ~(1 << OVS_KEY_ATTR_ICMP); + } + + if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) { + const struct ovs_key_icmpv6 *icmpv6_key; + + icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); + SW_FLOW_KEY_PUT(match, ipv6.tp.src, + htons(icmpv6_key->icmpv6_type), is_mask); + SW_FLOW_KEY_PUT(match, ipv6.tp.dst, + htons(icmpv6_key->icmpv6_code), is_mask); + attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); + } + + if (attrs & (1 << OVS_KEY_ATTR_ND)) { + const struct ovs_key_nd *nd_key; + + nd_key = nla_data(a[OVS_KEY_ATTR_ND]); + SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target, + nd_key->nd_target, + sizeof(match->key->ipv6.nd.target), + is_mask); + SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll, + nd_key->nd_sll, ETH_ALEN, is_mask); + SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll, + nd_key->nd_tll, ETH_ALEN, is_mask); + attrs &= ~(1 << OVS_KEY_ATTR_ND); + } + + if (attrs != 0) + return -EINVAL; + + return 0; +} + +/** + * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and + * mask. In case the 'mask' is NULL, the flow is treated as exact match + * flow. Otherwise, it is treated as a wildcarded flow, except the mask + * does not include any don't care bit. + * @match: receives the extracted flow match information. + * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute + * sequence. The fields should of the packet that triggered the creation + * of this flow. + * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink + * attribute specifies the mask field of the wildcarded flow. + */ +int ovs_match_from_nlattrs(struct sw_flow_match *match, + const struct nlattr *key, + const struct nlattr *mask) +{ + const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; + const struct nlattr *encap; + u64 key_attrs = 0; + u64 mask_attrs = 0; + bool encap_valid = false; + int err; + + err = parse_flow_nlattrs(key, a, &key_attrs); + if (err) + return err; + + if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) && + (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) && + (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) { + __be16 tci; + + if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && + (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { + OVS_NLERR("Invalid Vlan frame.\n"); return -EINVAL; - swkey->ip.proto = ntohs(arp_key->arp_op); - memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN); - memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN); + } + + key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); + tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); + encap = a[OVS_KEY_ATTR_ENCAP]; + key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); + encap_valid = true; + + if (tci & htons(VLAN_TAG_PRESENT)) { + err = parse_flow_nlattrs(encap, a, &key_attrs); + if (err) + return err; + } else if (!tci) { + /* Corner case for truncated 802.1Q header. */ + if (nla_len(encap)) { + OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); + return -EINVAL; + } + } else { + OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); + return -EINVAL; + } } - if (attrs) + err = ovs_key_from_nlattrs(match, key_attrs, a, false); + if (err) + return err; + + if (mask) { + err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); + if (err) + return err; + + if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) { + __be16 eth_type = 0; + __be16 tci = 0; + + if (!encap_valid) { + OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); + return -EINVAL; + } + + mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); + if (a[OVS_KEY_ATTR_ETHERTYPE]) + eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); + + if (eth_type == htons(0xffff)) { + mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); + encap = a[OVS_KEY_ATTR_ENCAP]; + err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); + } else { + OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", + ntohs(eth_type)); + return -EINVAL; + } + + if (a[OVS_KEY_ATTR_VLAN]) + tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); + + if (!(tci & htons(VLAN_TAG_PRESENT))) { + OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); + return -EINVAL; + } + } + + err = ovs_key_from_nlattrs(match, mask_attrs, a, true); + if (err) + return err; + } else { + /* Populate exact match flow's key mask. */ + if (match->mask) + ovs_sw_flow_mask_set(match->mask, &match->range, 0xff); + } + + if (!ovs_match_validate(match, key_attrs, mask_attrs)) return -EINVAL; - *key_lenp = key_len; return 0; } @@ -1255,7 +1662,6 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, /** * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. * @flow: Receives extracted in_port, priority, tun_key and skb_mark. - * @key_len: Length of key in @flow. Used for calculating flow hash. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. * @@ -1264,102 +1670,100 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, * get the metadata, that is, the parts of the flow key that cannot be * extracted from the packet itself. */ -int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, - const struct nlattr *attr) + +int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, + const struct nlattr *attr) { struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; - const struct nlattr *nla; - int rem; + const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; + u64 attrs = 0; + int err; + struct sw_flow_match match; flow->key.phy.in_port = DP_MAX_PORTS; flow->key.phy.priority = 0; flow->key.phy.skb_mark = 0; memset(tun_key, 0, sizeof(flow->key.tun_key)); - nla_for_each_nested(nla, attr, rem) { - int type = nla_type(nla); - - if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { - int err; - - if (nla_len(nla) != ovs_key_lens[type]) - return -EINVAL; - - switch (type) { - case OVS_KEY_ATTR_PRIORITY: - flow->key.phy.priority = nla_get_u32(nla); - break; - - case OVS_KEY_ATTR_TUNNEL: - err = ovs_ipv4_tun_from_nlattr(nla, tun_key); - if (err) - return err; - break; - - case OVS_KEY_ATTR_IN_PORT: - if (nla_get_u32(nla) >= DP_MAX_PORTS) - return -EINVAL; - flow->key.phy.in_port = nla_get_u32(nla); - break; - - case OVS_KEY_ATTR_SKB_MARK: - flow->key.phy.skb_mark = nla_get_u32(nla); - break; - } - } - } - if (rem) + err = parse_flow_nlattrs(attr, a, &attrs); + if (err) return -EINVAL; - flow->hash = ovs_flow_hash(&flow->key, - flow_key_start(&flow->key), key_len); + memset(&match, 0, sizeof(match)); + match.key = &flow->key; + + err = metadata_from_nlattrs(&match, &attrs, a, false); + if (err) + return err; return 0; } -int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) +int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, + const struct sw_flow_key *output, struct sk_buff *skb) { struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; + bool is_mask = (swkey != output); - if (swkey->phy.priority && - nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) + if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) goto nla_put_failure; - if (swkey->tun_key.ipv4_dst && - ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key)) + if ((swkey->tun_key.ipv4_dst || is_mask) && + ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) goto nla_put_failure; - if (swkey->phy.in_port != DP_MAX_PORTS && - nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) - goto nla_put_failure; + if (swkey->phy.in_port == DP_MAX_PORTS) { + if (is_mask && (output->phy.in_port == 0xffff)) + if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) + goto nla_put_failure; + } else { + u16 upper_u16; + upper_u16 = !is_mask ? 0 : 0xffff; - if (swkey->phy.skb_mark && - nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark)) + if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, + (upper_u16 << 16) | output->phy.in_port)) + goto nla_put_failure; + } + + if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) goto nla_put_failure; nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) goto nla_put_failure; + eth_key = nla_data(nla); - memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN); - memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); + memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); + memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { - if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) || - nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci)) + __be16 eth_type; + eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || + nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci)) goto nla_put_failure; encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); if (!swkey->eth.tci) goto unencap; - } else { + } else encap = NULL; - } - if (swkey->eth.type == htons(ETH_P_802_2)) + if (swkey->eth.type == htons(ETH_P_802_2)) { + /* + * Ethertype 802.2 is represented in the netlink with omitted + * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and + * 0xffff in the mask attribute. Ethertype can also + * be wildcarded. + */ + if (is_mask && output->eth.type) + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, + output->eth.type)) + goto nla_put_failure; goto unencap; + } - if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type)) + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) goto nla_put_failure; if (swkey->eth.type == htons(ETH_P_IP)) { @@ -1369,12 +1773,12 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (!nla) goto nla_put_failure; ipv4_key = nla_data(nla); - ipv4_key->ipv4_src = swkey->ipv4.addr.src; - ipv4_key->ipv4_dst = swkey->ipv4.addr.dst; - ipv4_key->ipv4_proto = swkey->ip.proto; - ipv4_key->ipv4_tos = swkey->ip.tos; - ipv4_key->ipv4_ttl = swkey->ip.ttl; - ipv4_key->ipv4_frag = swkey->ip.frag; + ipv4_key->ipv4_src = output->ipv4.addr.src; + ipv4_key->ipv4_dst = output->ipv4.addr.dst; + ipv4_key->ipv4_proto = output->ip.proto; + ipv4_key->ipv4_tos = output->ip.tos; + ipv4_key->ipv4_ttl = output->ip.ttl; + ipv4_key->ipv4_frag = output->ip.frag; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { struct ovs_key_ipv6 *ipv6_key; @@ -1382,15 +1786,15 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (!nla) goto nla_put_failure; ipv6_key = nla_data(nla); - memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src, + memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src, sizeof(ipv6_key->ipv6_src)); - memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst, + memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst, sizeof(ipv6_key->ipv6_dst)); - ipv6_key->ipv6_label = swkey->ipv6.label; - ipv6_key->ipv6_proto = swkey->ip.proto; - ipv6_key->ipv6_tclass = swkey->ip.tos; - ipv6_key->ipv6_hlimit = swkey->ip.ttl; - ipv6_key->ipv6_frag = swkey->ip.frag; + ipv6_key->ipv6_label = output->ipv6.label; + ipv6_key->ipv6_proto = output->ip.proto; + ipv6_key->ipv6_tclass = output->ip.tos; + ipv6_key->ipv6_hlimit = output->ip.ttl; + ipv6_key->ipv6_frag = output->ip.frag; } else if (swkey->eth.type == htons(ETH_P_ARP) || swkey->eth.type == htons(ETH_P_RARP)) { struct ovs_key_arp *arp_key; @@ -1400,11 +1804,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) goto nla_put_failure; arp_key = nla_data(nla); memset(arp_key, 0, sizeof(struct ovs_key_arp)); - arp_key->arp_sip = swkey->ipv4.addr.src; - arp_key->arp_tip = swkey->ipv4.addr.dst; - arp_key->arp_op = htons(swkey->ip.proto); - memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN); - memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN); + arp_key->arp_sip = output->ipv4.addr.src; + arp_key->arp_tip = output->ipv4.addr.dst; + arp_key->arp_op = htons(output->ip.proto); + memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); + memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); } if ((swkey->eth.type == htons(ETH_P_IP) || @@ -1419,11 +1823,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) goto nla_put_failure; tcp_key = nla_data(nla); if (swkey->eth.type == htons(ETH_P_IP)) { - tcp_key->tcp_src = swkey->ipv4.tp.src; - tcp_key->tcp_dst = swkey->ipv4.tp.dst; + tcp_key->tcp_src = output->ipv4.tp.src; + tcp_key->tcp_dst = output->ipv4.tp.dst; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - tcp_key->tcp_src = swkey->ipv6.tp.src; - tcp_key->tcp_dst = swkey->ipv6.tp.dst; + tcp_key->tcp_src = output->ipv6.tp.src; + tcp_key->tcp_dst = output->ipv6.tp.dst; } } else if (swkey->ip.proto == IPPROTO_UDP) { struct ovs_key_udp *udp_key; @@ -1433,11 +1837,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) goto nla_put_failure; udp_key = nla_data(nla); if (swkey->eth.type == htons(ETH_P_IP)) { - udp_key->udp_src = swkey->ipv4.tp.src; - udp_key->udp_dst = swkey->ipv4.tp.dst; + udp_key->udp_src = output->ipv4.tp.src; + udp_key->udp_dst = output->ipv4.tp.dst; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - udp_key->udp_src = swkey->ipv6.tp.src; - udp_key->udp_dst = swkey->ipv6.tp.dst; + udp_key->udp_src = output->ipv6.tp.src; + udp_key->udp_dst = output->ipv6.tp.dst; } } else if (swkey->eth.type == htons(ETH_P_IP) && swkey->ip.proto == IPPROTO_ICMP) { @@ -1447,8 +1851,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (!nla) goto nla_put_failure; icmp_key = nla_data(nla); - icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src); - icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst); + icmp_key->icmp_type = ntohs(output->ipv4.tp.src); + icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); } else if (swkey->eth.type == htons(ETH_P_IPV6) && swkey->ip.proto == IPPROTO_ICMPV6) { struct ovs_key_icmpv6 *icmpv6_key; @@ -1458,8 +1862,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (!nla) goto nla_put_failure; icmpv6_key = nla_data(nla); - icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src); - icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst); + icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); + icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { @@ -1469,10 +1873,10 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (!nla) goto nla_put_failure; nd_key = nla_data(nla); - memcpy(nd_key->nd_target, &swkey->ipv6.nd.target, + memcpy(nd_key->nd_target, &output->ipv6.nd.target, sizeof(nd_key->nd_target)); - memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN); - memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN); + memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); + memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); } } } @@ -1504,3 +1908,84 @@ void ovs_flow_exit(void) { kmem_cache_destroy(flow_cache); } + +struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) +{ + struct sw_flow_mask *mask; + + mask = kmalloc(sizeof(*mask), GFP_KERNEL); + if (mask) + mask->ref_count = 0; + + return mask; +} + +void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) +{ + mask->ref_count++; +} + +void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) +{ + if (!mask) + return; + + BUG_ON(!mask->ref_count); + mask->ref_count--; + + if (!mask->ref_count) { + list_del_rcu(&mask->list); + if (deferred) + kfree_rcu(mask, rcu); + else + kfree(mask); + } +} + +static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a, + const struct sw_flow_mask *b) +{ + u8 *a_ = (u8 *)&a->key + a->range.start; + u8 *b_ = (u8 *)&b->key + b->range.start; + + return (a->range.end == b->range.end) + && (a->range.start == b->range.start) + && (memcmp(a_, b_, ovs_sw_flow_mask_actual_size(a)) == 0); +} + +struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, + const struct sw_flow_mask *mask) +{ + struct list_head *ml; + + list_for_each(ml, tbl->mask_list) { + struct sw_flow_mask *m; + m = container_of(ml, struct sw_flow_mask, list); + if (ovs_sw_flow_mask_equal(mask, m)) + return m; + } + + return NULL; +} + +/** + * add a new mask into the mask list. + * The caller needs to make sure that 'mask' is not the same + * as any masks that are already on the list. + */ +void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) +{ + list_add_rcu(&mask->list, tbl->mask_list); +} + +/** + * Set 'range' fields in the mask to the value of 'val'. + */ +static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, + struct sw_flow_key_range *range, u8 val) +{ + u8 *m = (u8 *)&mask->key + range->start; + + mask->range = *range; + memset(m, val, ovs_sw_flow_mask_size_roundup(mask)); +} diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 66ef722..9674e45 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2011 Nicira, Inc. + * Copyright (c) 2007-2013 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -33,6 +33,8 @@ #include struct sk_buff; +struct sw_flow_mask; +struct flow_table; struct sw_flow_actions { struct rcu_head rcu; @@ -131,6 +133,8 @@ struct sw_flow { u32 hash; struct sw_flow_key key; + struct sw_flow_key unmasked_key; + struct sw_flow_mask *mask; struct sw_flow_actions __rcu *sf_acts; spinlock_t lock; /* Lock for values below. */ @@ -140,6 +144,25 @@ struct sw_flow { u8 tcp_flags; /* Union of seen TCP flags. */ }; +struct sw_flow_key_range { + size_t start; + size_t end; +}; + +static inline u16 ovs_sw_flow_key_range_actual_size(const struct sw_flow_key_range *range) +{ + return range->end - range->start; +} + +struct sw_flow_match { + struct sw_flow_key *key; + struct sw_flow_key_range range; + struct sw_flow_mask *mask; +}; + +void ovs_match_init(struct sw_flow_match *match, + struct sw_flow_key *key, struct sw_flow_mask *mask); + struct arp_eth_header { __be16 ar_hrd; /* format of hardware address */ __be16 ar_pro; /* format of protocol address */ @@ -159,21 +182,21 @@ void ovs_flow_exit(void); struct sw_flow *ovs_flow_alloc(void); void ovs_flow_deferred_free(struct sw_flow *); -void ovs_flow_free(struct sw_flow *flow); +void ovs_flow_free(struct sw_flow *, bool deferred); struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len); void ovs_flow_deferred_free_acts(struct sw_flow_actions *); -int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *, - int *key_lenp); +int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); void ovs_flow_used(struct sw_flow *, struct sk_buff *); u64 ovs_flow_used_time(unsigned long flow_jiffies); - -int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); -int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, +int ovs_flow_to_nlattrs(const struct sw_flow_key *, + const struct sw_flow_key *, struct sk_buff *); +int ovs_match_from_nlattrs(struct sw_flow_match *match, + const struct nlattr *, const struct nlattr *); -int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, - const struct nlattr *attr); +int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, + const struct nlattr *attr); #define MAX_ACTIONS_BUFSIZE (32 * 1024) #define TBL_MIN_BUCKETS 1024 @@ -182,6 +205,7 @@ struct flow_table { struct flex_array *buckets; unsigned int count, n_buckets; struct rcu_head rcu; + struct list_head *mask_list; int node_ver; u32 hash_seed; bool keep_flows; @@ -197,22 +221,56 @@ static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table) return (table->count > table->n_buckets); } -struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, - struct sw_flow_key *key, int len); -void ovs_flow_tbl_destroy(struct flow_table *table); -void ovs_flow_tbl_deferred_destroy(struct flow_table *table); +struct sw_flow *ovs_flow_lookup(struct flow_table *, + const struct sw_flow_key *); +struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, + struct sw_flow_match *match); + +void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); struct flow_table *ovs_flow_tbl_alloc(int new_size); struct flow_table *ovs_flow_tbl_expand(struct flow_table *table); struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table); -void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, - struct sw_flow_key *key, int key_len); -void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); -struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx); +void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow); +void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow); + +struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx); extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1]; int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, - struct ovs_key_ipv4_tunnel *tun_key); + struct sw_flow_match *match, bool is_mask); int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, - const struct ovs_key_ipv4_tunnel *tun_key); + const struct ovs_key_ipv4_tunnel *tun_key, + const struct ovs_key_ipv4_tunnel *output); + +bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, + const struct sw_flow_key *key, int key_len); + +struct sw_flow_mask { + int ref_count; + struct rcu_head rcu; + struct list_head list; + struct sw_flow_key_range range; + struct sw_flow_key key; +}; + +static inline u16 +ovs_sw_flow_mask_actual_size(const struct sw_flow_mask *mask) +{ + return ovs_sw_flow_key_range_actual_size(&mask->range); +} + +static inline u16 +ovs_sw_flow_mask_size_roundup(const struct sw_flow_mask *mask) +{ + return roundup(ovs_sw_flow_mask_actual_size(mask), sizeof(u32)); +} +struct sw_flow_mask *ovs_sw_flow_mask_alloc(void); +void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *); +void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred); +void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *); +struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *, + const struct sw_flow_mask *); +void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, + const struct sw_flow_mask *mask); #endif /* flow.h */ -- cgit v0.10.2 From 280c571e1a0f553d4f2384f1a8b643b5de26e0ec Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 23 Jul 2013 13:37:45 +0900 Subject: net: Add NEXTHDR_SCTP to ipv6.h Signed-off-by: Joe Stringer Signed-off-by: Jesse Gross diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5fe5649..7bdff04 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -41,6 +41,7 @@ #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ #define NEXTHDR_NONE 59 /* No next header */ #define NEXTHDR_DEST 60 /* Destination options header. */ +#define NEXTHDR_SCTP 132 /* SCTP message. */ #define NEXTHDR_MOBILITY 135 /* Mobility header. */ #define NEXTHDR_MAX 255 -- cgit v0.10.2 From b8e2fde466f7902fed4ad10bb60c1377b27cbfb7 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 23 Aug 2013 10:45:07 +0800 Subject: bonding: fix error return code in bond_enslave() Fix to return a negative error code in the add bond vlan ids error handling case instead of 0, as done elsewhere in this function. Introduced by commit 1ff412ad7714f6952f76ffd77f0a7f2f563288a1. (bonding: change the bond's vlan syncing functions with the standard ones) Signed-off-by: Wei Yongjun Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4264a76..7407e65 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1603,7 +1603,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) dev_mc_add(slave_dev, lacpdu_multicast); } - if (vlan_vids_add_by_dev(slave_dev, bond_dev)) { + res = vlan_vids_add_by_dev(slave_dev, bond_dev); + if (res) { pr_err("%s: Error: Couldn't add bond vlan ids to %s\n", bond_dev->name, slave_dev->name); goto err_close; -- cgit v0.10.2 From b4de77ade3fc56e41b978b68d78a351dab28b74e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 23 Aug 2013 11:15:37 +0300 Subject: ipip: potential race in ip_tunnel_init_net() Eric Dumazet says that my previous fix for an ERR_PTR dereference (ea857f28ab 'ipip: dereferencing an ERR_PTR in ip_tunnel_init_net()') could be racy and suggests the following fix instead. Reported-by: Eric Dumazet Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 24549b4..830de3f 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -854,16 +854,14 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, rtnl_lock(); itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms); - rtnl_unlock(); - - if (IS_ERR(itn->fb_tunnel_dev)) - return PTR_ERR(itn->fb_tunnel_dev); /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. */ - itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; + if (!IS_ERR(itn->fb_tunnel_dev)) + itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; + rtnl_unlock(); - return 0; + return PTR_RET(itn->fb_tunnel_dev); } EXPORT_SYMBOL_GPL(ip_tunnel_init_net); -- cgit v0.10.2 From bcdd822007e44b1da1ad5a62f20b75ee7d8da608 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 26 Aug 2013 15:32:58 +0800 Subject: mac80211_hwsim: fix error return code in init_mac80211_hwsim() Fix to return -ENOMEM in the netdev alloc error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index a0d2aac..2cd3f54 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2528,8 +2528,10 @@ static int __init init_mac80211_hwsim(void) } hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup); - if (hwsim_mon == NULL) + if (hwsim_mon == NULL) { + err = -ENOMEM; goto failed; + } rtnl_lock(); -- cgit v0.10.2 From a98655387762394371b88cdfb8215884757978ab Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 26 Aug 2013 09:30:32 +0200 Subject: mac80211: fix change_interface queue assignments Jouni reported that with mac80211_hwsim, multicast TX was causing crashes due to invalid vif->cab_queue assignment. It turns out that this is caused by change_interface() getting invoked and not having the vif->type/vif->p2p assigned correctly before calling the queue check (ieee80211_check_queues). Fix this by passing the 'external' interface type to the function and adjusting it accordingly. While at it, also fix the error path in change_interface, it wasn't correctly resetting to the external type but using the internal one instead. Fortunately this affects on hwsim because all other drivers set the vif->type/vif->p2p variables when changing iftype. This shouldn't be needed, but almost all implementations actually do it for their own internal handling. Reported-by: Jouni Malinen Signed-off-by: Johannes Berg diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 7ca534b..fcecd63 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -308,12 +308,13 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, return 0; } -static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata) +static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype iftype) { int n_queues = sdata->local->hw.queues; int i; - if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) { + if (iftype != NL80211_IFTYPE_P2P_DEVICE) { for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (WARN_ON_ONCE(sdata->vif.hw_queue[i] == IEEE80211_INVAL_HW_QUEUE)) @@ -324,8 +325,9 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata) } } - if ((sdata->vif.type != NL80211_IFTYPE_AP && - sdata->vif.type != NL80211_IFTYPE_MESH_POINT) || + if ((iftype != NL80211_IFTYPE_AP && + iftype != NL80211_IFTYPE_P2P_GO && + iftype != NL80211_IFTYPE_MESH_POINT) || !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) { sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; return 0; @@ -408,7 +410,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) return ret; } - ret = ieee80211_check_queues(sdata); + ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR); if (ret) { kfree(sdata); return ret; @@ -592,7 +594,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) res = drv_add_interface(local, sdata); if (res) goto err_stop; - res = ieee80211_check_queues(sdata); + res = ieee80211_check_queues(sdata, + ieee80211_vif_type_p2p(&sdata->vif)); if (res) goto err_del_interface; } @@ -1389,14 +1392,14 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, ret = drv_change_interface(local, sdata, internal_type, p2p); if (ret) - type = sdata->vif.type; + type = ieee80211_vif_type_p2p(&sdata->vif); /* * Ignore return value here, there's not much we can do since * the driver changed the interface type internally already. * The warnings will hopefully make driver authors fix it :-) */ - ieee80211_check_queues(sdata); + ieee80211_check_queues(sdata, type); ieee80211_setup_sdata(sdata, type); -- cgit v0.10.2 From 21c6af6b69b609b7934caaccda1b4535dceb402c Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Thu, 22 Aug 2013 20:53:21 +0200 Subject: rt2x00: rt2800lib: add rt2800_hw_beacon_base helper The HW_BEACON_BASE() macro returns the base address of a given beacon, however the returned values are not usable on all chipsets. On devices which have selectable shared memory parts, some beacon may be located in the high part of the shared memory. Instead of extending the already complicated macro, add a new helper function and use that to get the base address of a given beacon. The actual patch contains no functional changes, the helper function will be extended in a further patch to handle different chipsets' requirements. Signed-off-by: Gabor Juhos Acked-by: Helmut Schaa Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index aa6b6b0..38606e2 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -940,6 +940,12 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi) } EXPORT_SYMBOL_GPL(rt2800_txdone_entry); +static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev, + unsigned int index) +{ + return HW_BEACON_BASE(index); +} + void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; @@ -992,7 +998,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) return; } - beacon_base = HW_BEACON_BASE(entry->entry_idx); + beacon_base = rt2800_hw_beacon_base(rt2x00dev, entry->entry_idx); + rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, entry->skb->len + padding_len); @@ -1017,7 +1024,7 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev, const int txwi_desc_size = rt2x00dev->bcn->winfo_size; unsigned int beacon_base; - beacon_base = HW_BEACON_BASE(index); + beacon_base = rt2800_hw_beacon_base(rt2x00dev, index); /* * For the Beacon base registers we only need to clear -- cgit v0.10.2 From 634b80595fef79071d82bc231b7f82c4f808a1e8 Mon Sep 17 00:00:00 2001 From: Gabor Juhos Date: Thu, 22 Aug 2013 20:53:22 +0200 Subject: rt2x00: rt2800lib: don't hardcode beacon offsets The values written into the BCN_OFFSET[01] registers are hardcoded in the rt2800_init_register function. Add a macro and a helper function to derive these values directly from the base address of a given beacon, and use the new function instead of the hardcoded numbers. The patch contains no functional changes. The programmed register values are the same before and after the patch. Signed-off-by: Gabor Juhos Acked-by: Helmut Schaa Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index e25e5bf..fa33b5e 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -2024,6 +2024,8 @@ struct mac_iveiv_entry { (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \ (HW_BEACON_BASE6 - ((__index - 6) * 0x0200)))) +#define BEACON_BASE_TO_OFFSET(_base) (((_base) - 0x4000) / 64) + /* * BBP registers. * The wordsize of the BBP is 8 bits. diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 38606e2..20243dc 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -946,6 +946,12 @@ static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev, return HW_BEACON_BASE(index); } +static inline u8 rt2800_get_beacon_offset(struct rt2x00_dev *rt2x00dev, + unsigned int index) +{ + return BEACON_BASE_TO_OFFSET(rt2800_hw_beacon_base(rt2x00dev, index)); +} + void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; @@ -4474,17 +4480,25 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) return ret; rt2800_register_read(rt2x00dev, BCN_OFFSET0, ®); - rt2x00_set_field32(®, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */ - rt2x00_set_field32(®, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */ - rt2x00_set_field32(®, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */ - rt2x00_set_field32(®, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */ + rt2x00_set_field32(®, BCN_OFFSET0_BCN0, + rt2800_get_beacon_offset(rt2x00dev, 0)); + rt2x00_set_field32(®, BCN_OFFSET0_BCN1, + rt2800_get_beacon_offset(rt2x00dev, 1)); + rt2x00_set_field32(®, BCN_OFFSET0_BCN2, + rt2800_get_beacon_offset(rt2x00dev, 2)); + rt2x00_set_field32(®, BCN_OFFSET0_BCN3, + rt2800_get_beacon_offset(rt2x00dev, 3)); rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg); rt2800_register_read(rt2x00dev, BCN_OFFSET1, ®); - rt2x00_set_field32(®, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */ - rt2x00_set_field32(®, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */ - rt2x00_set_field32(®, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */ - rt2x00_set_field32(®, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */ + rt2x00_set_field32(®, BCN_OFFSET1_BCN4, + rt2800_get_beacon_offset(rt2x00dev, 4)); + rt2x00_set_field32(®, BCN_OFFSET1_BCN5, + rt2800_get_beacon_offset(rt2x00dev, 5)); + rt2x00_set_field32(®, BCN_OFFSET1_BCN6, + rt2800_get_beacon_offset(rt2x00dev, 6)); + rt2x00_set_field32(®, BCN_OFFSET1_BCN7, + rt2800_get_beacon_offset(rt2x00dev, 7)); rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg); rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f); -- cgit v0.10.2 From f4a83e578e0011ddcfdbe1c62d0916dadb4802aa Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Fri, 23 Aug 2013 23:22:29 +0200 Subject: bcma: change max PCI read request size to 128 This PCIe controller does not support a max read request size above 128 bytes. The sold card I tested this controller with used 128 as default value, but some new routers are sold with BCM4331 chips, which have a default max read request size of 512. This device fails at the first DMA reqeust whch is bigger than 126 bytes. This patch changes the max read request size to 128 for every device on the PCIe link. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index 30629a3..c3d7b03 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -581,6 +581,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses); int bcma_core_pci_plat_dev_init(struct pci_dev *dev) { struct bcma_drv_pci_host *pc_host; + int readrq; if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { /* This is not a device on the PCI-core bridge. */ @@ -595,6 +596,11 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev) dev->irq = bcma_core_irq(pc_host->pdev->core); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + readrq = pcie_get_readrq(dev); + if (readrq > 128) { + pr_info("change PCIe max read request size from %i to 128\n", readrq); + pcie_set_readrq(dev, 128); + } return 0; } EXPORT_SYMBOL(bcma_core_pci_plat_dev_init); -- cgit v0.10.2 From cfe51ec1ae427ec0be5a7670eae815ce5eb30e1c Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 24 Aug 2013 00:32:30 +0200 Subject: bcma: add method to power up and down the PCIe core by wifi driver The wifi driver should tell the PCIe core that it is now in operation so that some workarounds can be applied and the power state is changed. This should replace the call to bcma_core_pci_extend_L1timer by the brcmsmac driver. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index cf7a476..6ea817f 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c @@ -275,3 +275,29 @@ void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); } EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer); + +void bcma_core_pci_up(struct bcma_bus *bus) +{ + struct bcma_drv_pci *pc; + + if (bus->hosttype != BCMA_HOSTTYPE_PCI) + return; + + pc = &bus->drv_pci[0]; + + bcma_core_pci_extend_L1timer(pc, true); +} +EXPORT_SYMBOL_GPL(bcma_core_pci_up); + +void bcma_core_pci_down(struct bcma_bus *bus) +{ + struct bcma_drv_pci *pc; + + if (bus->hosttype != BCMA_HOSTTYPE_PCI) + return; + + pc = &bus->drv_pci[0]; + + bcma_core_pci_extend_L1timer(pc, false); +} +EXPORT_SYMBOL_GPL(bcma_core_pci_down); diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 424760f..27f9ceb 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -185,6 +185,7 @@ struct pci_dev; #define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001 struct bcma_drv_pci; +struct bcma_bus; #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE struct bcma_drv_pci_host { @@ -220,6 +221,8 @@ extern void bcma_core_pci_init(struct bcma_drv_pci *pc); extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, bool enable); extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend); +extern void bcma_core_pci_up(struct bcma_bus *bus); +extern void bcma_core_pci_down(struct bcma_bus *bus); extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); -- cgit v0.10.2 From aa51e598d04c6acf5477934cd6383f5a17ce9029 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 24 Aug 2013 00:32:31 +0200 Subject: brcmsmac: use bcma PCIe up and down functions replace the calls to bcma_core_pci_extend_L1timer() by calls to the newly introduced bcma_core_pci_ip() and bcma_core_pci_down() Signed-off-by: Hauke Mehrtens Cc: Arend van Spriel Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c index e4fd1ee..5336597 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c @@ -679,27 +679,6 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode) return mode == BCMA_CLKMODE_FAST; } -void ai_pci_up(struct si_pub *sih) -{ - struct si_info *sii; - - sii = container_of(sih, struct si_info, pub); - - if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) - bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true); -} - -/* Unconfigure and/or apply various WARs when going down */ -void ai_pci_down(struct si_pub *sih) -{ - struct si_info *sii; - - sii = container_of(sih, struct si_info, pub); - - if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) - bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false); -} - /* Enable BT-COEX & Ex-PA for 4313 */ void ai_epa_4313war(struct si_pub *sih) { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h index 89562c1..a8a267b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h @@ -183,9 +183,6 @@ extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode); extern bool ai_deviceremoved(struct si_pub *sih); -extern void ai_pci_down(struct si_pub *sih); -extern void ai_pci_up(struct si_pub *sih); - /* Enable Ex-PA for 4313 */ extern void ai_epa_4313war(struct si_pub *sih); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index c3c6123..4608e0e 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -4669,7 +4669,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core, brcms_c_coredisable(wlc_hw); /* Match driver "down" state */ - ai_pci_down(wlc_hw->sih); + bcma_core_pci_down(wlc_hw->d11core->bus); /* turn off pll and xtal to match driver "down" state */ brcms_b_xtal(wlc_hw, OFF); @@ -5012,12 +5012,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw) */ if (brcms_b_radio_read_hwdisabled(wlc_hw)) { /* put SB PCI in down state again */ - ai_pci_down(wlc_hw->sih); + bcma_core_pci_down(wlc_hw->d11core->bus); brcms_b_xtal(wlc_hw, OFF); return -ENOMEDIUM; } - ai_pci_up(wlc_hw->sih); + bcma_core_pci_up(wlc_hw->d11core->bus); /* reset the d11 core */ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); @@ -5214,7 +5214,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw) /* turn off primary xtal and pll */ if (!wlc_hw->noreset) { - ai_pci_down(wlc_hw->sih); + bcma_core_pci_down(wlc_hw->d11core->bus); brcms_b_xtal(wlc_hw, OFF); } } -- cgit v0.10.2 From 780335acc815802dcee63d75f5589d43c3ccb402 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 24 Aug 2013 00:32:32 +0200 Subject: bcma: do not export bcma_core_pci_extend_L1timer() This is not called any more, do not export it. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index 6ea817f..d51d194 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c @@ -262,7 +262,7 @@ out: } EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl); -void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) +static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) { u32 w; @@ -274,7 +274,6 @@ void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w); bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG); } -EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer); void bcma_core_pci_up(struct bcma_bus *bus) { diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 27f9ceb..0234955 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -220,7 +220,6 @@ struct bcma_drv_pci { extern void bcma_core_pci_init(struct bcma_drv_pci *pc); extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, bool enable); -extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend); extern void bcma_core_pci_up(struct bcma_bus *bus); extern void bcma_core_pci_down(struct bcma_bus *bus); -- cgit v0.10.2 From 521deea64088bc885a76bd174241eaa3d3a6876f Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 24 Aug 2013 00:32:33 +0200 Subject: bcma: add bcma_core_pci_power_save() This enables or disables power saving on the PCIe bus when the wifi is in operation or not. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index d51d194..c9fd694 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c @@ -31,7 +31,7 @@ static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data) pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data); } -static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy) +static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy) { u32 v; int i; @@ -55,7 +55,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy) } } -static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address) +static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address) { int max_retries = 10; u16 ret = 0; @@ -98,7 +98,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address) return ret; } -static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device, +static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device, u8 address, u16 data) { int max_retries = 10; @@ -137,6 +137,13 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device, pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); } +static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device, + u8 address, u16 data) +{ + bcma_pcie_mdio_write(pc, device, address, data); + return bcma_pcie_mdio_read(pc, device, address); +} + /************************************************** * Workarounds. **************************************************/ @@ -203,6 +210,25 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) } } +static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up) +{ + u16 data; + + if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) { + data = up ? 0x74 : 0x7C; + bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, + BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64); + bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, + BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); + } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) { + data = up ? 0x75 : 0x7D; + bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, + BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65); + bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, + BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); + } +} + /************************************************** * Init. **************************************************/ @@ -284,6 +310,8 @@ void bcma_core_pci_up(struct bcma_bus *bus) pc = &bus->drv_pci[0]; + bcma_core_pci_power_save(pc, true); + bcma_core_pci_extend_L1timer(pc, true); } EXPORT_SYMBOL_GPL(bcma_core_pci_up); @@ -298,5 +326,7 @@ void bcma_core_pci_down(struct bcma_bus *bus) pc = &bus->drv_pci[0]; bcma_core_pci_extend_L1timer(pc, false); + + bcma_core_pci_power_save(pc, false); } EXPORT_SYMBOL_GPL(bcma_core_pci_down); diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 0234955..d66033f 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -181,6 +181,26 @@ struct pci_dev; #define BCMA_CORE_PCI_CFG_DEVCTRL 0xd8 +#define BCMA_CORE_PCI_ + +/* MDIO devices (SERDES modules) */ +#define BCMA_CORE_PCI_MDIO_IEEE0 0x000 +#define BCMA_CORE_PCI_MDIO_IEEE1 0x001 +#define BCMA_CORE_PCI_MDIO_BLK0 0x800 +#define BCMA_CORE_PCI_MDIO_BLK1 0x801 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT0 0x16 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT1 0x17 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT2 0x18 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT3 0x19 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT4 0x1A +#define BCMA_CORE_PCI_MDIO_BLK2 0x802 +#define BCMA_CORE_PCI_MDIO_BLK3 0x803 +#define BCMA_CORE_PCI_MDIO_BLK4 0x804 +#define BCMA_CORE_PCI_MDIO_TXPLL 0x808 /* TXPLL register block idx */ +#define BCMA_CORE_PCI_MDIO_TXCTRL0 0x820 +#define BCMA_CORE_PCI_MDIO_SERDESID 0x831 +#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840 + /* PCIE Root Capability Register bits (Host mode only) */ #define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001 -- cgit v0.10.2 From 50023008f63c162bdb4b688c65d826bad627120e Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 24 Aug 2013 00:32:34 +0200 Subject: b43: call PCIe up and down functions Tell the PCIe host core when the wifi is activated. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 0e933bb..ccd24f0a 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -4645,6 +4645,19 @@ static void b43_wireless_core_exit(struct b43_wldev *dev) b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN, B43_MACCTL_PSM_JMP0); + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + bcma_core_pci_down(dev->dev->bdev->bus); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + /* TODO */ + break; +#endif + } + b43_dma_free(dev); b43_pio_free(dev); b43_chip_exit(dev); @@ -4684,6 +4697,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev) case B43_BUS_BCMA: bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0], dev->dev->bdev, true); + bcma_core_pci_up(dev->dev->bdev->bus); break; #endif #ifdef CONFIG_B43_SSB -- cgit v0.10.2 From 027d3307f44c6ddccb2194474a18ff71cbff0ad9 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Fri, 23 Aug 2013 16:48:21 -0700 Subject: mwifiex: fix driver unload problem for usb chipsets We have usb_deregister() call in our rmmod routine. deauth, shutdown etc. commands will be sent to FW later when bus driver calls disconnect handler. This mechanism works fine with SDIO and PCIe interfaces, but there is an issue with USB. USB bus driver returns all URBs submitted for receiving data and command response immediately after usb_deregister() with failure status. Hence we don't send deauth, shutdown etc. command to firmware. The problem is fixed by moving code from disconnect handler to rmmod routine for USB interface. Signed-off-by: Amitkumar Karwar Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index fca98b5..2472d4b 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -24,9 +24,9 @@ static const char usbdriver_name[] = "usb8797"; -static u8 user_rmmod; static struct mwifiex_if_ops usb_ops; static struct semaphore add_remove_card_sem; +static struct usb_card_rec *usb_card; static struct usb_device_id mwifiex_usb_table[] = { {USB_DEVICE(USB8797_VID, USB8797_PID_1)}, @@ -350,6 +350,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf, card->udev = udev; card->intf = intf; + usb_card = card; pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n", udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass, @@ -532,7 +533,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) { struct usb_card_rec *card = usb_get_intfdata(intf); struct mwifiex_adapter *adapter; - int i; if (!card || !card->adapter) { pr_err("%s: card or card->adapter is NULL\n", __func__); @@ -543,27 +543,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) if (!adapter->priv_num) return; - /* In case driver is removed when asynchronous FW downloading is - * in progress - */ - wait_for_completion(&adapter->fw_load); - - if (user_rmmod) { -#ifdef CONFIG_PM - if (adapter->is_suspended) - mwifiex_usb_resume(intf); -#endif - for (i = 0; i < adapter->priv_num; i++) - if ((GET_BSS_ROLE(adapter->priv[i]) == - MWIFIEX_BSS_ROLE_STA) && - adapter->priv[i]->media_connected) - mwifiex_deauthenticate(adapter->priv[i], NULL); - - mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter, - MWIFIEX_BSS_ROLE_ANY), - MWIFIEX_FUNC_SHUTDOWN); - } - mwifiex_usb_free(card); dev_dbg(adapter->dev, "%s: removing card\n", __func__); @@ -1032,8 +1011,29 @@ static void mwifiex_usb_cleanup_module(void) if (!down_interruptible(&add_remove_card_sem)) up(&add_remove_card_sem); - /* set the flag as user is removing this module */ - user_rmmod = 1; + if (usb_card) { + struct mwifiex_adapter *adapter = usb_card->adapter; + int i; + + /* In case driver is removed when asynchronous FW downloading is + * in progress + */ + wait_for_completion(&adapter->fw_load); + +#ifdef CONFIG_PM + if (adapter->is_suspended) + mwifiex_usb_resume(usb_card->intf); +#endif + for (i = 0; i < adapter->priv_num; i++) + if ((GET_BSS_ROLE(adapter->priv[i]) == + MWIFIEX_BSS_ROLE_STA) && + adapter->priv[i]->media_connected) + mwifiex_deauthenticate(adapter->priv[i], NULL); + + mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_ANY), + MWIFIEX_FUNC_SHUTDOWN); + } usb_deregister(&mwifiex_usb_driver); } -- cgit v0.10.2 From 68f95b09c80af7a0b7a27a03227fa802d55f3616 Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Fri, 23 Aug 2013 16:48:22 -0700 Subject: mwifiex: fix ext_capab IE structure definition EXT_CAPAB_IE format involves IEEE header followed by bytestream of capabilities. Current structure has incorrect member u8 for data; fix it by defining it as u8[0]. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 41e9d25..b579a2e 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -292,6 +292,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, struct mwifiex_ie_types_extcap *ext_cap; int ret_len = 0; struct ieee80211_supported_band *sband; + struct ieee_types_header *hdr; u8 radio_type; if (!buffer || !*buffer) @@ -388,17 +389,18 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, } if (bss_desc->bcn_ext_cap) { + hdr = (void *)bss_desc->bcn_ext_cap; ext_cap = (struct mwifiex_ie_types_extcap *) *buffer; memset(ext_cap, 0, sizeof(struct mwifiex_ie_types_extcap)); ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY); - ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap)); + ext_cap->header.len = cpu_to_le16(hdr->len); - memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header), + memcpy((u8 *)ext_cap->ext_capab, bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header), le16_to_cpu(ext_cap->header.len)); - *buffer += sizeof(struct mwifiex_ie_types_extcap); - ret_len += sizeof(struct mwifiex_ie_types_extcap); + *buffer += sizeof(struct mwifiex_ie_types_extcap) + hdr->len; + ret_len += sizeof(struct mwifiex_ie_types_extcap) + hdr->len; } return ret_len; diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index c9ad1c0..f80f30b 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -1330,7 +1330,7 @@ struct mwifiex_ie_types_2040bssco { struct mwifiex_ie_types_extcap { struct mwifiex_ie_types_header header; - u8 ext_cap; + u8 ext_capab[0]; } __packed; struct host_cmd_ds_mac_reg_access { -- cgit v0.10.2 From 587b36d3642bdc921f4d624a740b6d91f779324b Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Fri, 23 Aug 2013 16:48:23 -0700 Subject: mwifiex: drop gratuitous ARP frames This patch adds support for dropping gratuitous ARP frames which is requirement for WFA Hotspot2.0. Hotspot2.0 capability is enabled in driver if extended capabilities IE from BSS descriptor has 11u interworking enabled. Signed-off-by: Avinash Patil Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index b579a2e..0b803c0 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -399,6 +399,12 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header), le16_to_cpu(ext_cap->header.len)); + if (hdr->len > 3 && + ext_cap->ext_capab[3] & WLAN_EXT_CAPA4_INTERWORKING_ENABLED) + priv->hs2_enabled = true; + else + priv->hs2_enabled = false; + *buffer += sizeof(struct mwifiex_ie_types_extcap) + hdr->len; ret_len += sizeof(struct mwifiex_ie_types_extcap) + hdr->len; } diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index ca149ae..fbad00a 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1508,6 +1508,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, " reason code %d\n", priv->cfg_bssid, reason_code); memset(priv->cfg_bssid, 0, ETH_ALEN); + priv->hs2_enabled = false; return 0; } diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index a599347..5c85d78 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -152,4 +153,12 @@ struct mwifiex_types_wmm_info { u8 reserved; struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS]; } __packed; + +struct mwifiex_arp_eth_header { + struct arphdr hdr; + u8 ar_sha[ETH_ALEN]; + u8 ar_sip[4]; + u8 ar_tha[ETH_ALEN]; + u8 ar_tip[4]; +} __packed; #endif /* !_MWIFIEX_DECL_H_ */ diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index e021a58..6499117 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -136,6 +136,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->csa_chan = 0; priv->csa_expire_time = 0; priv->del_list_idx = 0; + priv->hs2_enabled = false; return mwifiex_add_bss_prio_tbl(priv); } diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index d2e5ccd..7d4d137 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -516,6 +516,7 @@ struct mwifiex_private { u8 csa_chan; unsigned long csa_expire_time; u8 del_list_idx; + bool hs2_enabled; }; enum mwifiex_ba_status { diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c index b5c1095..bb22664 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/mwifiex/sta_rx.c @@ -17,6 +17,8 @@ * this warranty disclaimer. */ +#include +#include #include "decl.h" #include "ioctl.h" #include "util.h" @@ -25,6 +27,46 @@ #include "11n_aggr.h" #include "11n_rxreorder.h" +/* This function checks if a frame is IPv4 ARP or IPv6 Neighbour advertisement + * frame. If frame has both source and destination mac address as same, this + * function drops such gratuitous frames. + */ +static bool +mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + const struct mwifiex_arp_eth_header *arp; + struct ethhdr *eth_hdr; + struct ipv6hdr *ipv6; + struct icmp6hdr *icmpv6; + + eth_hdr = (struct ethhdr *)skb->data; + switch (ntohs(eth_hdr->h_proto)) { + case ETH_P_ARP: + arp = (void *)(skb->data + sizeof(struct ethhdr)); + if (arp->hdr.ar_op == htons(ARPOP_REPLY) || + arp->hdr.ar_op == htons(ARPOP_REQUEST)) { + if (!memcmp(arp->ar_sip, arp->ar_tip, 4)) + return true; + } + break; + case ETH_P_IPV6: + ipv6 = (void *)(skb->data + sizeof(struct ethhdr)); + icmpv6 = (void *)(skb->data + sizeof(struct ethhdr) + + sizeof(struct ipv6hdr)); + if (NDISC_NEIGHBOUR_ADVERTISEMENT == icmpv6->icmp6_type) { + if (!memcmp(&ipv6->saddr, &ipv6->daddr, + sizeof(struct in6_addr))) + return true; + } + break; + default: + break; + } + + return false; +} + /* * This function processes the received packet and forwards it * to kernel/upper layer. @@ -90,6 +132,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, either the reconstructed EthII frame or the 802.2/llc/snap frame */ skb_pull(skb, hdr_chop); + if (priv->hs2_enabled && + mwifiex_discard_gratuitous_arp(priv, skb)) { + dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n"); + dev_kfree_skb_any(skb); + return 0; + } + priv->rxpd_rate = local_rx_pd->rx_rate; priv->rxpd_htinfo = local_rx_pd->ht_info; -- cgit v0.10.2 From b380a43b52bee70f2e31ed573d33191efd82f5ae Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 25 Aug 2013 14:43:09 +0530 Subject: ath9k: Fix ASPM for AR9462 If the L1 entrance latency is not calibrated properly in the EEPROM in WB222 boards, there could be problems in connectivity. Check and correct the calibrated value if it doesn't match the optimal value for WB222, 4us. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 738aa7e..582cddd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -745,6 +745,20 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah) static void ar9003_hw_configpcipowersave(struct ath_hw *ah, bool power_off) { + /* + * Increase L1 Entry Latency. Some WB222 boards don't have + * this change in eeprom/OTP. + * + */ + if (AR_SREV_9462(ah)) { + u32 val = ah->config.aspm_l1_fix; + if ((val & 0xff000000) == 0x17000000) { + val &= 0x00ffffff; + val |= 0x27000000; + REG_WRITE(ah, 0x570c, val); + } + } + /* Nothing to do on restore for 11N */ if (!power_off /* !restore */) { /* set bit 19 to allow forcing of pcie core into L1 state */ diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index fa543a6..69a907b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -311,6 +311,7 @@ struct ath9k_ops_config { u16 ani_poll_interval; /* ANI poll interval in ms */ /* Platform specific config */ + u32 aspm_l1_fix; u32 xlna_gpio; u32 ant_ctrl_comm2g_switch_enable; bool xatten_margin_cfg; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 3280798..e7996a6 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -314,6 +314,22 @@ static void ath_pci_aspm_init(struct ath_common *common) return; } + /* + * 0x70c - Ack Frequency Register. + * + * Bits 27:29 - DEFAULT_L1_ENTRANCE_LATENCY. + * + * 000 : 1 us + * 001 : 2 us + * 010 : 4 us + * 011 : 8 us + * 100 : 16 us + * 101 : 32 us + * 110/111 : 64 us + */ + if (AR_SREV_9462(ah)) + pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix); + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm); if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) { ah->aspm_enabled = true; -- cgit v0.10.2 From 8aada63cc408874916a19341ba514f941096e424 Mon Sep 17 00:00:00 2001 From: Djalal Harouni Date: Sun, 25 Aug 2013 11:50:49 +0100 Subject: ath5k: debugfs: NULL-terminate strings Avoid processing garbage data by NULL terminating the strings. Signed-off-by: Djalal Harouni Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 9d00dab..b8d031a 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -245,9 +245,11 @@ static ssize_t write_file_beacon(struct file *file, struct ath5k_hw *ah = file->private_data; char buf[20]; - if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + count = min_t(size_t, count, sizeof(buf) - 1); + if (copy_from_user(buf, userbuf, count)) return -EFAULT; + buf[count] = '\0'; if (strncmp(buf, "disable", 7) == 0) { AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); pr_info("debugfs disable beacons\n"); @@ -345,9 +347,11 @@ static ssize_t write_file_debug(struct file *file, unsigned int i; char buf[20]; - if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + count = min_t(size_t, count, sizeof(buf) - 1); + if (copy_from_user(buf, userbuf, count)) return -EFAULT; + buf[count] = '\0'; for (i = 0; i < ARRAY_SIZE(dbg_info); i++) { if (strncmp(buf, dbg_info[i].name, strlen(dbg_info[i].name)) == 0) { @@ -448,9 +452,11 @@ static ssize_t write_file_antenna(struct file *file, unsigned int i; char buf[20]; - if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + count = min_t(size_t, count, sizeof(buf) - 1); + if (copy_from_user(buf, userbuf, count)) return -EFAULT; + buf[count] = '\0'; if (strncmp(buf, "diversity", 9) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); pr_info("debug: enable diversity\n"); @@ -619,9 +625,11 @@ static ssize_t write_file_frameerrors(struct file *file, struct ath5k_statistics *st = &ah->stats; char buf[20]; - if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + count = min_t(size_t, count, sizeof(buf) - 1); + if (copy_from_user(buf, userbuf, count)) return -EFAULT; + buf[count] = '\0'; if (strncmp(buf, "clear", 5) == 0) { st->rxerr_crc = 0; st->rxerr_phy = 0; @@ -766,9 +774,11 @@ static ssize_t write_file_ani(struct file *file, struct ath5k_hw *ah = file->private_data; char buf[20]; - if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + count = min_t(size_t, count, sizeof(buf) - 1); + if (copy_from_user(buf, userbuf, count)) return -EFAULT; + buf[count] = '\0'; if (strncmp(buf, "sens-low", 8) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH); } else if (strncmp(buf, "sens-high", 9) == 0) { @@ -862,9 +872,11 @@ static ssize_t write_file_queue(struct file *file, struct ath5k_hw *ah = file->private_data; char buf[20]; - if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + count = min_t(size_t, count, sizeof(buf) - 1); + if (copy_from_user(buf, userbuf, count)) return -EFAULT; + buf[count] = '\0'; if (strncmp(buf, "start", 5) == 0) ieee80211_wake_queues(ah->hw); else if (strncmp(buf, "stop", 4) == 0) -- cgit v0.10.2 From d1ae25a0174938f03e28dee8f3269a826fc1bec5 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Sun, 25 Aug 2013 16:30:40 +0530 Subject: ath9k: Fix ASPM workaround usage The PCIE Workaround register (AR_WA/0x4004) is used to handle various hardware quirks. For AR9002 chips, AR_WA_D3_L1_DISABLE is used to prevent the HW from automatically entering L1 state when D3 is enforced. AR_WA_D3_L1_DISABLE has to be enabled for a few AR9280 based cards, mark them based on their PCI subdevice/subvendor IDs and enforce it in ar9002_hw_configpcipowersave(). Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index 8dc2d08..fb61b08 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -269,13 +269,12 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) val |= AR_WA_D3_L1_DISABLE; } else { - if (((AR_SREV_9285(ah) || - AR_SREV_9271(ah) || - AR_SREV_9287(ah)) && - (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) || - (AR_SREV_9280(ah) && - (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) { - val |= AR_WA_D3_L1_DISABLE; + if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) { + if (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE) + val |= AR_WA_D3_L1_DISABLE; + } else if (AR_SREV_9280(ah)) { + if (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE) + val |= AR_WA_D3_L1_DISABLE; } } @@ -297,24 +296,18 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, } else { if (ah->config.pcie_waen) { val = ah->config.pcie_waen; - if (!power_off) - val &= (~AR_WA_D3_L1_DISABLE); + val &= (~AR_WA_D3_L1_DISABLE); } else { - if (AR_SREV_9285(ah) || - AR_SREV_9271(ah) || - AR_SREV_9287(ah)) { + if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) { val = AR9285_WA_DEFAULT; - if (!power_off) - val &= (~AR_WA_D3_L1_DISABLE); - } - else if (AR_SREV_9280(ah)) { + val &= (~AR_WA_D3_L1_DISABLE); + } else if (AR_SREV_9280(ah)) { /* * For AR9280 chips, bit 22 of 0x4004 * needs to be set. */ val = AR9280_WA_DEFAULT; - if (!power_off) - val &= (~AR_WA_D3_L1_DISABLE); + val &= (~AR_WA_D3_L1_DISABLE); } else { val = AR_WA_DEFAULT; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 582cddd..608bb48 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -763,12 +763,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah, if (!power_off /* !restore */) { /* set bit 19 to allow forcing of pcie core into L1 state */ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); - - /* Several PCIe massages to ensure proper behaviour */ - if (ah->config.pcie_waen) - REG_WRITE(ah, AR_WA, ah->config.pcie_waen); - else - REG_WRITE(ah, AR_WA, ah->WARegVal); + REG_WRITE(ah, AR_WA, ah->WARegVal); } /* diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 8519e75..2ee35f6 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -631,6 +631,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); #define ATH9K_PCI_CUS217 0x0004 #define ATH9K_PCI_WOW 0x0008 #define ATH9K_PCI_BT_ANT_DIV 0x0010 +#define ATH9K_PCI_D3_L1_WAR 0x0020 /* * Default cache line size, in bytes. diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index b3a6891..2670bf6 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -450,7 +450,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah) ah->config.ack_6mb = 0x0; ah->config.cwm_ignore_extcca = 0; ah->config.pcie_clock_req = 0; - ah->config.pcie_waen = 0; ah->config.analog_shiftreg = 1; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 19b46c7..c9f787d 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -551,6 +551,11 @@ static void ath9k_init_platform(struct ath_softc *sc) pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; ath_info(common, "Set BT/WLAN RX diversity capability\n"); } + + if (sc->driver_data & ATH9K_PCI_D3_L1_WAR) { + ah->config.pcie_waen = 0x0040473b; + ath_info(common, "Enable WAR for ASPM D3/L1\n"); + } } static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index e7996a6..d089a7c 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -30,6 +30,52 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + PCI_VENDOR_ID_AZWAVE, + 0x1C71), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + PCI_VENDOR_ID_FOXCONN, + 0xE01F), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + 0x11AD, /* LITEON */ + 0x6632), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + 0x11AD, /* LITEON */ + 0x6642), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + PCI_VENDOR_ID_QMI, + 0x0306), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + 0x185F, /* WNC */ + 0x309D), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + 0x10CF, /* Fujitsu */ + 0x147C), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + 0x10CF, /* Fujitsu */ + 0x147D), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x002A, + 0x10CF, /* Fujitsu */ + 0x1536), + .driver_data = ATH9K_PCI_D3_L1_WAR }, + /* AR9285 card for Asus */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 0x002B, -- cgit v0.10.2 From d4c04ba141d4c5981aebc0aa0ecadc0f16f99387 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 26 Aug 2013 11:47:22 +0530 Subject: ath9k: Fix TX poll work locking There is no need to call ath_txq_unlock_complete() in the TX poll routine - frame completion is not done here, so use ath_txq_unlock(). Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index fff5d3c..2f831db 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -41,7 +41,7 @@ void ath_tx_complete_poll_work(struct work_struct *work) txq->axq_tx_inprogress = true; } } - ath_txq_unlock_complete(sc, txq); + ath_txq_unlock(sc, txq); } if (needreset) { -- cgit v0.10.2 From cfcd926ea2bc46aaca4e9ca3f0a3b8ac35b9d691 Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Mon, 26 Aug 2013 09:18:06 +0200 Subject: mwifiex: add missing endian conversions Fixes multiple locations where a little endian host is assumed during ser/des of messages sent to/received from the chip. Signed-off-by: Tobias Waldekranz Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 8f9f542..f69d7e0 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -69,7 +69,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset); /* Copy SNAP header */ - snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset); + snap.snap_type = le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset)); dt_offset += sizeof(u16); memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr)); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 7d4d137..1d72f13 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -1026,7 +1026,7 @@ mwifiex_netdev_get_priv(struct net_device *dev) */ static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb) { - return (*(u32 *)skb->data == PKT_TYPE_MGMT); + return (le32_to_cpu(*(__le32 *)skb->data) == PKT_TYPE_MGMT); } /* This function retrieves channel closed for operation by Channel diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 0e2070f..1576104 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -1062,7 +1062,7 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, case MWIFIEX_TYPE_EVENT: dev_dbg(adapter->dev, "info: --- Rx: Event ---\n"); - adapter->event_cause = *(u32 *) skb->data; + adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data); if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE)) memcpy(adapter->event_body, @@ -1207,8 +1207,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) { /* get curr PKT len & type */ - pkt_len = *(u16 *) &curr_ptr[0]; - pkt_type = *(u16 *) &curr_ptr[2]; + pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]); + pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]); /* copy pkt to deaggr buf */ skb_deaggr = card->mpa_rx.skb_arr[pind]; diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 6a814eb..58a6013 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -280,7 +280,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, tlv_buf = ((u8 *)rate_cfg) + sizeof(struct host_cmd_ds_tx_rate_cfg); - tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16)); + tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16))); while (tlv_buf && tlv_buf_len > 0) { tlv = (*tlv_buf); -- cgit v0.10.2 From 58fe431342fdcf53fe93c4482351a9aa46762a11 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 26 Aug 2013 15:32:01 +0800 Subject: zd1201: fix error return code Fix to return -ENOMEM in the memory alloc error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 4941f20..73aa738 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -75,8 +75,10 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) len = fw_entry->size; buf = kmalloc(1024, GFP_ATOMIC); - if (!buf) + if (!buf) { + err = -ENOMEM; goto exit; + } while (len > 0) { int translen = (len > 1024) ? 1024 : len; @@ -1762,8 +1764,10 @@ static int zd1201_probe(struct usb_interface *interface, zd->endp_out2 = 2; zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL); zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!zd->rx_urb || !zd->tx_urb) + if (!zd->rx_urb || !zd->tx_urb) { + err = -ENOMEM; goto err_zd; + } mdelay(100); err = zd1201_drvr_start(zd); -- cgit v0.10.2 From e5614a91be86b4b2d56f68c036fd7e81473b4016 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Mon, 26 Aug 2013 13:23:43 +0530 Subject: ath9k: Fix DEBUG_FS dependency for ath9k Reported-by: Johannes Berg Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index c91bc61..7944c25 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -56,7 +56,7 @@ config ATH9K_AHB config ATH9K_DEBUGFS bool "Atheros ath9k debugging" - depends on ATH9K + depends on ATH9K && DEBUG_FS select MAC80211_DEBUGFS select RELAY ---help--- -- cgit v0.10.2 From 93ba2a856f75aead35a043f169a49ce398afe737 Mon Sep 17 00:00:00 2001 From: Andrea Merello Date: Mon, 26 Aug 2013 13:53:30 +0200 Subject: Update e-mail address for Andrea Merello (resubmit) A lot of files contain reference to my old e-mail address. Now I'm going not to read mail from it anymore, so update it with my current address everywhere. Signed-off-by: Andrea Merello Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c index 91a04e2..fc207b2 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c @@ -3,10 +3,10 @@ * Linux device driver for RTL8180 / RTL8185 * * Copyright 2007 Michael Wu - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * * Thanks to Realtek for their support! * @@ -32,7 +32,7 @@ #include "grf5101.h" MODULE_AUTHOR("Michael Wu "); -MODULE_AUTHOR("Andrea Merello "); +MODULE_AUTHOR("Andrea Merello "); MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c index 077ff92..dc845693 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c +++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c @@ -2,7 +2,7 @@ /* * Radio tuning for GCT GRF5101 on RTL8180 * - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h index 7664711..4d80a27 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h +++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h @@ -4,7 +4,7 @@ /* * Radio tuning for GCT GRF5101 on RTL8180 * - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c index 4715000..a63c443 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c +++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c @@ -1,7 +1,7 @@ /* * Radio tuning for Maxim max2820 on RTL8180 * - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/rtl818x/rtl8180/max2820.h index 61cf6d1..8e982b7 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/max2820.h +++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.h @@ -4,7 +4,7 @@ /* * Radio tuning for Maxim max2820 on RTL8180 * - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c index cc2a541..ee638d0 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c @@ -3,10 +3,10 @@ * Radio tuning for RTL8225 on RTL8180 * * Copyright 2007 Michael Wu - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Based on the r8180 driver, which is: - * Copyright 2005 Andrea Merello , et al. + * Copyright 2005 Andrea Merello , et al. * * Thanks to Realtek for their support! * diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c index b3ec40f..7614d9c 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c +++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c @@ -2,7 +2,7 @@ /* * Radio tuning for Philips SA2400 on RTL8180 * - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h index a4aaa0d..fb0093f 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h +++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h @@ -4,7 +4,7 @@ /* * Radio tuning for Philips SA2400 on RTL8180 * - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index f49220e..841fb9d 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -2,10 +2,10 @@ * Linux device driver for RTL8187 * * Copyright 2007 Michael Wu - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Based on the r8187 driver, which is: - * Copyright 2005 Andrea Merello , et al. + * Copyright 2005 Andrea Merello , et al. * * The driver was extended to the RTL8187B in 2008 by: * Herton Ronaldo Krzesinski @@ -37,7 +37,7 @@ #include "rfkill.h" MODULE_AUTHOR("Michael Wu "); -MODULE_AUTHOR("Andrea Merello "); +MODULE_AUTHOR("Andrea Merello "); MODULE_AUTHOR("Herton Ronaldo Krzesinski "); MODULE_AUTHOR("Hin-Tak Leung "); MODULE_AUTHOR("Larry Finger "); diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h index e19a20a..56aee06 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h @@ -2,10 +2,10 @@ * Definitions for RTL8187 hardware * * Copyright 2007 Michael Wu - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Based on the r8187 driver, which is: - * Copyright 2005 Andrea Merello , et al. + * Copyright 2005 Andrea Merello , et al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c index f0bf35f..a26193a 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c @@ -2,10 +2,10 @@ * Radio tuning for RTL8225 on RTL8187 * * Copyright 2007 Michael Wu - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Based on the r8187 driver, which is: - * Copyright 2005 Andrea Merello , et al. + * Copyright 2005 Andrea Merello , et al. * * Magic delays, register offsets, and phy value tables below are * taken from the original r8187 driver sources. Thanks to Realtek diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h index 20c5b6e..141afb0 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h @@ -2,10 +2,10 @@ * Radio tuning definitions for RTL8225 on RTL8187 * * Copyright 2007 Michael Wu - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Based on the r8187 driver, which is: - * Copyright 2005 Andrea Merello , et al. + * Copyright 2005 Andrea Merello , et al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h index 1615f63..ce23dfd 100644 --- a/drivers/net/wireless/rtl818x/rtl818x.h +++ b/drivers/net/wireless/rtl818x/rtl818x.h @@ -2,10 +2,10 @@ * Definitions for RTL818x hardware * * Copyright 2007 Michael Wu - * Copyright 2007 Andrea Merello + * Copyright 2007 Andrea Merello * * Based on the r8187 driver, which is: - * Copyright 2005 Andrea Merello , et al. + * Copyright 2005 Andrea Merello , et al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h index 8fc9f58..7f01549 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h @@ -14,7 +14,7 @@ * Copyright (c) 2004, Intel Corporation * * Modified for Realtek's wi-fi cards by Andrea Merello - * + * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c index d5df0d6..10b2210 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c @@ -14,7 +14,7 @@ ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by - Andrea Merello + Andrea Merello A special thanks goes to Realtek for their support ! diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c index 00f9af0..b65db54 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c @@ -1,5 +1,5 @@ /* IEEE 802.11 SoftMAC layer - * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Andrea Merello * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c index d9add53..e528206 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c @@ -1,5 +1,5 @@ /* IEEE 802.11 SoftMAC layer - * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Andrea Merello * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c index 89ed86e..b346653 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c @@ -25,7 +25,7 @@ ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by - Andrea Merello + Andrea Merello A special thanks goes to Realtek for their support ! diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h index edacc80..d052f4a 100644 --- a/drivers/staging/rtl8187se/r8180.h +++ b/drivers/staging/rtl8187se/r8180.h @@ -1,6 +1,6 @@ /* This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the diff --git a/drivers/staging/rtl8187se/r8180_93cx6.h b/drivers/staging/rtl8187se/r8180_93cx6.h index 79e7391..b52b5b0 100644 --- a/drivers/staging/rtl8187se/r8180_93cx6.h +++ b/drivers/staging/rtl8187se/r8180_93cx6.h @@ -1,6 +1,6 @@ /* This is part of rtl8180 OpenSource driver - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c index ca69155..fd2bfead 100644 --- a/drivers/staging/rtl8187se/r8180_core.c +++ b/drivers/staging/rtl8187se/r8180_core.c @@ -1,6 +1,6 @@ /* This is part of rtl818x pci OpenSource driver - v 0.1 - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public License) Parts of this driver are based on the GPL part of the official @@ -70,7 +70,7 @@ static int hwwep; MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl); -MODULE_AUTHOR("Andrea Merello "); +MODULE_AUTHOR("Andrea Merello "); MODULE_DESCRIPTION("Linux driver for Realtek RTL8187SE WiFi cards"); module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR); diff --git a/drivers/staging/rtl8187se/r8180_hw.h b/drivers/staging/rtl8187se/r8180_hw.h index 5339381..92c05af 100644 --- a/drivers/staging/rtl8187se/r8180_hw.h +++ b/drivers/staging/rtl8187se/r8180_hw.h @@ -1,6 +1,6 @@ /* This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h index c6f2128..c94ca07 100644 --- a/drivers/staging/rtl8187se/r8180_rtl8225.h +++ b/drivers/staging/rtl8187se/r8180_rtl8225.h @@ -1,7 +1,7 @@ /* This is part of the rtl8180-sa2400 driver released under the GPL (See file COPYING for details). - Copyright (c) 2005 Andrea Merello + Copyright (c) 2005 Andrea Merello This files contains programming code for the rtl8225 radio frontend. diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c index c592f79..9ae96b7 100644 --- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c +++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c @@ -1,7 +1,7 @@ /* * This is part of the rtl8180-sa2400 driver * released under the GPL (See file COPYING for details). - * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Andrea Merello * * This files contains programming code for the rtl8225 * radio frontend. diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c index 156b758..dab7875 100644 --- a/drivers/staging/rtl8187se/r8180_wx.c +++ b/drivers/staging/rtl8187se/r8180_wx.c @@ -2,7 +2,7 @@ This file contains wireless extension handlers. This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part diff --git a/drivers/staging/rtl8187se/r8180_wx.h b/drivers/staging/rtl8187se/r8180_wx.h index 4081914..d471520 100644 --- a/drivers/staging/rtl8187se/r8180_wx.h +++ b/drivers/staging/rtl8187se/r8180_wx.h @@ -1,6 +1,6 @@ /* This is part of rtl8180 OpenSource driver - v 0.3 - Copyright (C) Andrea Merello 2004 + Copyright (C) Andrea Merello 2004 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c index 50c7bb7..74fbd70 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h index b9b3b52..dbe0e1c 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c index baf3b63..fa5603a 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h index fa607f9..7d075d3 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 2b6c61c..8d45c8d 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h index 87d4d34..9de1dc3 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c index c1ccff4..a6778e0 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h index 9452e16..adea2b4 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c index 0cfb3ec..529ea54 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c index 5abbee3..2ad92ee 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h index 28c7da6..356aec4 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c index c9a7c56..a8c2ade 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h index df79d6c..962f2e5 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 3485ef1..05ef49f 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -14,7 +14,7 @@ * Copyright (c) 2004, Intel Corporation * * Modified for Realtek's wi-fi cards by Andrea Merello - * + * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h index 2bfc115..c59f67b 100644 --- a/drivers/staging/rtl8192e/rtllib_debug.h +++ b/drivers/staging/rtl8192e/rtllib_debug.h @@ -2,7 +2,7 @@ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index e75364e..96aa3a2 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -14,7 +14,7 @@ ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by - Andrea Merello + Andrea Merello A special thanks goes to Realtek for their support ! diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index aefffac..0cbf6f5 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -1,5 +1,5 @@ /* IEEE 802.11 SoftMAC layer - * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Andrea Merello * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c index 740cf85..e6af8cf 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c +++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c @@ -1,5 +1,5 @@ /* IEEE 802.11 SoftMAC layer - * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Andrea Merello * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c index 759d7c7..1cc6a9d 100644 --- a/drivers/staging/rtl8192e/rtllib_tx.c +++ b/drivers/staging/rtl8192e/rtllib_tx.c @@ -25,7 +25,7 @@ ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by - Andrea Merello + Andrea Merello A special thanks goes to Realtek for their support ! diff --git a/drivers/staging/rtl8192u/authors b/drivers/staging/rtl8192u/authors index b08bbae..0fab112 100644 --- a/drivers/staging/rtl8192u/authors +++ b/drivers/staging/rtl8192u/authors @@ -1 +1 @@ -Andrea Merello +Andrea Merello diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h index c9f3bb3..bc64f05 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h @@ -14,7 +14,7 @@ * Copyright (c) 2004, Intel Corporation * * Modified for Realtek's wi-fi cards by Andrea Merello - * + * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index a6b1840..59900bf 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -14,7 +14,7 @@ ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by - Andrea Merello + Andrea Merello A special thanks goes to Realtek for their support ! diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index 8a0075d..5fd6969 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -1,5 +1,5 @@ /* IEEE 802.11 SoftMAC layer - * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Andrea Merello * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c index 60746b8..7b7d929 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c @@ -1,5 +1,5 @@ /* IEEE 802.11 SoftMAC layer - * Copyright (c) 2005 Andrea Merello + * Copyright (c) 2005 Andrea Merello * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c index 9955042..a7bcc64f 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c @@ -25,7 +25,7 @@ ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by - Andrea Merello + Andrea Merello A special thanks goes to Realtek for their support ! diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c index d219998..c61729b 100644 --- a/drivers/staging/rtl8192u/r8180_93cx6.c +++ b/drivers/staging/rtl8192u/r8180_93cx6.c @@ -3,7 +3,7 @@ memory is addressed by 16 bits words. This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004 + Copyright (C) Andrea Merello 2004 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the diff --git a/drivers/staging/rtl8192u/r8180_93cx6.h b/drivers/staging/rtl8192u/r8180_93cx6.h index 5cea51e..ee55dbf 100644 --- a/drivers/staging/rtl8192u/r8180_93cx6.h +++ b/drivers/staging/rtl8192u/r8180_93cx6.h @@ -1,6 +1,6 @@ /* This is part of rtl8187 OpenSource driver - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver diff --git a/drivers/staging/rtl8192u/r8180_pm.c b/drivers/staging/rtl8192u/r8180_pm.c index 0c58d0e..999968d 100644 --- a/drivers/staging/rtl8192u/r8180_pm.c +++ b/drivers/staging/rtl8192u/r8180_pm.c @@ -5,7 +5,7 @@ does not do anything useful. This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004 + Copyright (C) Andrea Merello 2004 Released under the terms of GPL (General Public Licence) */ diff --git a/drivers/staging/rtl8192u/r8180_pm.h b/drivers/staging/rtl8192u/r8180_pm.h index 52d6fba..4be63da 100644 --- a/drivers/staging/rtl8192u/r8180_pm.h +++ b/drivers/staging/rtl8192u/r8180_pm.h @@ -5,7 +5,7 @@ does not do anything useful. This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004 + Copyright (C) Andrea Merello 2004 Released under the terms of GPL (General Public Licence) */ diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h index b64dd66..592e780 100644 --- a/drivers/staging/rtl8192u/r8190_rtl8256.h +++ b/drivers/staging/rtl8192u/r8190_rtl8256.h @@ -1,7 +1,7 @@ /* This is part of the rtl8180-sa2400 driver released under the GPL (See file COPYING for details). - Copyright (c) 2005 Andrea Merello + Copyright (c) 2005 Andrea Merello This files contains programming code for the rtl8256 radio frontend. diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h index 338e7bc..b484ee1 100644 --- a/drivers/staging/rtl8192u/r8192U.h +++ b/drivers/staging/rtl8192u/r8192U.h @@ -1,6 +1,6 @@ /* * This is part of rtl8187 OpenSource driver. - * Copyright (C) Andrea Merello 2004-2005 + * Copyright (C) Andrea Merello 2004-2005 * Released under the terms of GPL (General Public Licence) * * Parts of this driver are based on the GPL part of the diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 14c14c2..cd0946d 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -3,7 +3,7 @@ * Linux device driver for RTL8192U * * Based on the r8187 driver, which is: - * Copyright 2004-2005 Andrea Merello , et al. + * Copyright 2004-2005 Andrea Merello , et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h index 7e612aa..dd07a73 100644 --- a/drivers/staging/rtl8192u/r8192U_hw.h +++ b/drivers/staging/rtl8192u/r8192U_hw.h @@ -1,6 +1,6 @@ /* This is part of rtl8187 OpenSource driver. - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c index 3e25763..61f6620 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.c +++ b/drivers/staging/rtl8192u/r8192U_wx.c @@ -2,7 +2,7 @@ This file contains wireless extension handlers. This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004-2005 + Copyright (C) Andrea Merello 2004-2005 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h index 9f6b105..ae7a617 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.h +++ b/drivers/staging/rtl8192u/r8192U_wx.h @@ -1,6 +1,6 @@ /* This is part of rtl8180 OpenSource driver - v 0.3 - Copyright (C) Andrea Merello 2004 + Copyright (C) Andrea Merello 2004 Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver -- cgit v0.10.2 From 6e956da2027c767859128b9bfef085cf2a8e233b Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 26 Aug 2013 15:18:53 +0200 Subject: rt2800: fix wrong TX power compensation We should not do temperature compensation on devices without EXTERNAL_TX_ALC bit set (called DynamicTxAgcControl on vendor driver). Such devices can have totally bogus TSSI parameters on the EEPROM, but still threaded by us as valid and result doing wrong TX power calculations. This fix inability to connect to AP on slightly longer distance on some Ralink chips/devices. Reported-and-tested-by: Fabien ADAM Cc: stable@vger.kernel.org Signed-off-by: Stanislaw Gruszka Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 20243dc..660d7d8 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3408,6 +3408,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) int i; /* + * First check if temperature compensation is supported. + */ + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC)) + return 0; + + /* * Read TSSI boundaries for temperature compensation from * the EEPROM. * -- cgit v0.10.2 From 148cbb53ace32f584d208764c7f7e6aa8edb970c Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 22 Aug 2013 17:57:28 +0200 Subject: net/cadence/macb: add support for dt phy definition The macb driver only handle PHY description through platform_data (macb_platform_data). Thus, when using dt you cannot define phy properties like phy address or phy irq pin. This patch makes use of the of_mdiobus_register to add support for phy device definition using dt. A fallback to the autoscan procedure is added in case there is no phy devices defined in dt. Signed-off-by: Boris BREZILLON Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index e866608..fe06ab0 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -314,6 +315,7 @@ static int macb_mii_probe(struct net_device *dev) int macb_mii_init(struct macb *bp) { struct macb_platform_data *pdata; + struct device_node *np; int err = -ENXIO, i; /* Enable management port */ @@ -335,21 +337,46 @@ int macb_mii_init(struct macb *bp) bp->mii_bus->parent = &bp->dev->dev; pdata = bp->pdev->dev.platform_data; - if (pdata) - bp->mii_bus->phy_mask = pdata->phy_mask; - bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!bp->mii_bus->irq) { err = -ENOMEM; goto err_out_free_mdiobus; } - for (i = 0; i < PHY_MAX_ADDR; i++) - bp->mii_bus->irq[i] = PHY_POLL; - dev_set_drvdata(&bp->dev->dev, bp->mii_bus); - if (mdiobus_register(bp->mii_bus)) + np = bp->pdev->dev.of_node; + if (np) { + /* try dt phy registration */ + err = of_mdiobus_register(bp->mii_bus, np); + + /* fallback to standard phy registration if no phy were + found during dt phy registration */ + if (!err && !phy_find_first(bp->mii_bus)) { + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *phydev; + + phydev = mdiobus_scan(bp->mii_bus, i); + if (IS_ERR(phydev)) { + err = PTR_ERR(phydev); + break; + } + } + + if (err) + goto err_out_unregister_bus; + } + } else { + for (i = 0; i < PHY_MAX_ADDR; i++) + bp->mii_bus->irq[i] = PHY_POLL; + + if (pdata) + bp->mii_bus->phy_mask = pdata->phy_mask; + + err = mdiobus_register(bp->mii_bus); + } + + if (err) goto err_out_free_mdio_irq; if (macb_mii_probe(bp->dev) != 0) { -- cgit v0.10.2 From 8c038e7e14b1c5f156745e3c4df0a3aa46173dd9 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 22 Aug 2013 17:58:29 +0200 Subject: ARM: at91/dt: define phy available on sama5d3 mother board This patch describe the phy used on atmel sama5d3 mother board: - phy address - phy interrupt pin Signed-off-by: Boris BREZILLON Signed-off-by: David S. Miller diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi index 8a9e05d..e9521d5 100644 --- a/arch/arm/boot/dts/sama5d3xmb.dtsi +++ b/arch/arm/boot/dts/sama5d3xmb.dtsi @@ -81,6 +81,14 @@ macb1: ethernet@f802c000 { phy-mode = "rmii"; + + #address-cells = <1>; + #size-cells = <0>; + phy0: ethernet-phy@0 { + interrupt-parent = <&pioE>; + interrupts = <30 IRQ_TYPE_EDGE_FALLING>; + reg = <1>; + }; }; pinctrl@fffff200 { -- cgit v0.10.2 From a175a723301a8a4a9fedf9ce5b8ca586e7a97b40 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Thu, 22 Aug 2013 12:30:48 -0700 Subject: openvswitch: Add SCTP support This patch adds support for rewriting SCTP src,dst ports similar to the functionality already available for TCP/UDP. Rewriting SCTP ports is expensive due to double-recalculation of the SCTP checksums; this is performed to ensure that packets traversing OVS with invalid checksums will continue to the destination with any checksum corruption intact. Reviewed-by: Simon Horman Signed-off-by: Joe Stringer Signed-off-by: Ben Pfaff Signed-off-by: Jesse Gross diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index de1fa5d..a74d375 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -259,6 +259,7 @@ enum ovs_key_attr { OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */ OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */ + OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */ #ifdef __KERNEL__ OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */ @@ -333,6 +334,11 @@ struct ovs_key_udp { __be16 udp_dst; }; +struct ovs_key_sctp { + __be16 sctp_src; + __be16 sctp_dst; +}; + struct ovs_key_icmp { __u8 icmp_type; __u8 icmp_code; diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index bed30e6..6ecf491 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -4,6 +4,7 @@ config OPENVSWITCH tristate "Open vSwitch" + select LIBCRC32C ---help--- Open vSwitch is a multilayer Ethernet switch targeted at virtualized environments. In addition to supporting a variety of features diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 1f68022..65cfaa8 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include #include "datapath.h" #include "vport.h" @@ -352,6 +354,39 @@ static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) return 0; } +static int set_sctp(struct sk_buff *skb, + const struct ovs_key_sctp *sctp_port_key) +{ + struct sctphdr *sh; + int err; + unsigned int sctphoff = skb_transport_offset(skb); + + err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); + if (unlikely(err)) + return err; + + sh = sctp_hdr(skb); + if (sctp_port_key->sctp_src != sh->source || + sctp_port_key->sctp_dst != sh->dest) { + __le32 old_correct_csum, new_csum, old_csum; + + old_csum = sh->checksum; + old_correct_csum = sctp_compute_cksum(skb, sctphoff); + + sh->source = sctp_port_key->sctp_src; + sh->dest = sctp_port_key->sctp_dst; + + new_csum = sctp_compute_cksum(skb, sctphoff); + + /* Carry any checksum errors through. */ + sh->checksum = old_csum ^ old_correct_csum ^ new_csum; + + skb->rxhash = 0; + } + + return 0; +} + static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) { struct vport *vport; @@ -461,6 +496,10 @@ static int execute_set_action(struct sk_buff *skb, case OVS_KEY_ATTR_UDP: err = set_udp(skb, nla_data(nested_attr)); break; + + case OVS_KEY_ATTR_SCTP: + err = set_sctp(skb, nla_data(nested_attr)); + break; } return err; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d29cd9a..2aa13bd 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -712,6 +712,12 @@ static int validate_set(const struct nlattr *a, return validate_tp_port(flow_key); + case OVS_KEY_ATTR_SCTP: + if (flow_key->ip.proto != IPPROTO_SCTP) + return -EINVAL; + + return validate_tp_port(flow_key); + default: return -EINVAL; } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 1fceb96..2b47855 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -129,6 +130,7 @@ static bool ovs_match_validate(const struct sw_flow_match *match, | (1 << OVS_KEY_ATTR_IPV6) | (1 << OVS_KEY_ATTR_TCP) | (1 << OVS_KEY_ATTR_UDP) + | (1 << OVS_KEY_ATTR_SCTP) | (1 << OVS_KEY_ATTR_ICMP) | (1 << OVS_KEY_ATTR_ICMPV6) | (1 << OVS_KEY_ATTR_ARP) @@ -159,6 +161,12 @@ static bool ovs_match_validate(const struct sw_flow_match *match, mask_allowed |= 1 << OVS_KEY_ATTR_UDP; } + if (match->key->ip.proto == IPPROTO_SCTP) { + key_expected |= 1 << OVS_KEY_ATTR_SCTP; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; + } + if (match->key->ip.proto == IPPROTO_TCP) { key_expected |= 1 << OVS_KEY_ATTR_TCP; if (match->mask && (match->mask->key.ip.proto == 0xff)) @@ -185,6 +193,12 @@ static bool ovs_match_validate(const struct sw_flow_match *match, mask_allowed |= 1 << OVS_KEY_ATTR_UDP; } + if (match->key->ip.proto == IPPROTO_SCTP) { + key_expected |= 1 << OVS_KEY_ATTR_SCTP; + if (match->mask && (match->mask->key.ip.proto == 0xff)) + mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; + } + if (match->key->ip.proto == IPPROTO_TCP) { key_expected |= 1 << OVS_KEY_ATTR_TCP; if (match->mask && (match->mask->key.ip.proto == 0xff)) @@ -280,6 +294,12 @@ static bool udphdr_ok(struct sk_buff *skb) sizeof(struct udphdr)); } +static bool sctphdr_ok(struct sk_buff *skb) +{ + return pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct sctphdr)); +} + static bool icmphdr_ok(struct sk_buff *skb) { return pskb_may_pull(skb, skb_transport_offset(skb) + @@ -891,6 +911,12 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) key->ipv4.tp.src = udp->source; key->ipv4.tp.dst = udp->dest; } + } else if (key->ip.proto == IPPROTO_SCTP) { + if (sctphdr_ok(skb)) { + struct sctphdr *sctp = sctp_hdr(skb); + key->ipv4.tp.src = sctp->source; + key->ipv4.tp.dst = sctp->dest; + } } else if (key->ip.proto == IPPROTO_ICMP) { if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); @@ -953,6 +979,12 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) key->ipv6.tp.src = udp->source; key->ipv6.tp.dst = udp->dest; } + } else if (key->ip.proto == NEXTHDR_SCTP) { + if (sctphdr_ok(skb)) { + struct sctphdr *sctp = sctp_hdr(skb); + key->ipv6.tp.src = sctp->source; + key->ipv6.tp.dst = sctp->dest; + } } else if (key->ip.proto == NEXTHDR_ICMP) { if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, nh_len); @@ -1087,6 +1119,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), + [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp), [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), @@ -1500,6 +1533,24 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, attrs &= ~(1 << OVS_KEY_ATTR_UDP); } + if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { + const struct ovs_key_sctp *sctp_key; + + sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); + if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { + SW_FLOW_KEY_PUT(match, ipv4.tp.src, + sctp_key->sctp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv4.tp.dst, + sctp_key->sctp_dst, is_mask); + } else { + SW_FLOW_KEY_PUT(match, ipv6.tp.src, + sctp_key->sctp_src, is_mask); + SW_FLOW_KEY_PUT(match, ipv6.tp.dst, + sctp_key->sctp_dst, is_mask); + } + attrs &= ~(1 << OVS_KEY_ATTR_SCTP); + } + if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { const struct ovs_key_icmp *icmp_key; @@ -1843,6 +1894,20 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, udp_key->udp_src = output->ipv6.tp.src; udp_key->udp_dst = output->ipv6.tp.dst; } + } else if (swkey->ip.proto == IPPROTO_SCTP) { + struct ovs_key_sctp *sctp_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key)); + if (!nla) + goto nla_put_failure; + sctp_key = nla_data(nla); + if (swkey->eth.type == htons(ETH_P_IP)) { + sctp_key->sctp_src = swkey->ipv4.tp.src; + sctp_key->sctp_dst = swkey->ipv4.tp.dst; + } else if (swkey->eth.type == htons(ETH_P_IPV6)) { + sctp_key->sctp_src = swkey->ipv6.tp.src; + sctp_key->sctp_dst = swkey->ipv6.tp.dst; + } } else if (swkey->eth.type == htons(ETH_P_IP) && swkey->ip.proto == IPPROTO_ICMP) { struct ovs_key_icmp *icmp_key; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 9674e45..d08dcf7 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -99,8 +99,8 @@ struct sw_flow_key { } addr; union { struct { - __be16 src; /* TCP/UDP source port. */ - __be16 dst; /* TCP/UDP destination port. */ + __be16 src; /* TCP/UDP/SCTP source port. */ + __be16 dst; /* TCP/UDP/SCTP destination port. */ } tp; struct { u8 sha[ETH_ALEN]; /* ARP source hardware address. */ @@ -115,8 +115,8 @@ struct sw_flow_key { } addr; __be32 label; /* IPv6 flow label. */ struct { - __be16 src; /* TCP/UDP source port. */ - __be16 dst; /* TCP/UDP destination port. */ + __be16 src; /* TCP/UDP/SCTP source port. */ + __be16 dst; /* TCP/UDP/SCTP destination port. */ } tp; struct { struct in6_addr target; /* ND target address. */ -- cgit v0.10.2 From 02237373b1c61a09a4db329545e39cffc48910d5 Mon Sep 17 00:00:00 2001 From: Andy Zhou Date: Thu, 22 Aug 2013 12:12:57 -0700 Subject: openvswitch: Rename key_len to key_end Key_end is a better name describing the ending boundary than key_len. Rename those variables to make it less confusing. Signed-off-by: Andy Zhou Signed-off-by: Jesse Gross diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 2b47855..80bcb96 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -997,10 +997,11 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) return 0; } -static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len) +static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, + int key_end) { return jhash2((u32 *)((u8 *)key + key_start), - DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0); + DIV_ROUND_UP(key_end - key_start, sizeof(u32)), 0); } static int flow_key_start(const struct sw_flow_key *key) @@ -1012,31 +1013,31 @@ static int flow_key_start(const struct sw_flow_key *key) } static bool __cmp_key(const struct sw_flow_key *key1, - const struct sw_flow_key *key2, int key_start, int key_len) + const struct sw_flow_key *key2, int key_start, int key_end) { return !memcmp((u8 *)key1 + key_start, - (u8 *)key2 + key_start, (key_len - key_start)); + (u8 *)key2 + key_start, (key_end - key_start)); } static bool __flow_cmp_key(const struct sw_flow *flow, - const struct sw_flow_key *key, int key_start, int key_len) + const struct sw_flow_key *key, int key_start, int key_end) { - return __cmp_key(&flow->key, key, key_start, key_len); + return __cmp_key(&flow->key, key, key_start, key_end); } static bool __flow_cmp_unmasked_key(const struct sw_flow *flow, - const struct sw_flow_key *key, int key_start, int key_len) + const struct sw_flow_key *key, int key_start, int key_end) { - return __cmp_key(&flow->unmasked_key, key, key_start, key_len); + return __cmp_key(&flow->unmasked_key, key, key_start, key_end); } bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, - const struct sw_flow_key *key, int key_len) + const struct sw_flow_key *key, int key_end) { int key_start; key_start = flow_key_start(key); - return __flow_cmp_unmasked_key(flow, key, key_start, key_len); + return __flow_cmp_unmasked_key(flow, key, key_start, key_end); } @@ -1044,11 +1045,11 @@ struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, struct sw_flow_match *match) { struct sw_flow_key *unmasked = match->key; - int key_len = match->range.end; + int key_end = match->range.end; struct sw_flow *flow; flow = ovs_flow_lookup(table, unmasked); - if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_len))) + if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end))) flow = NULL; return flow; @@ -1061,16 +1062,16 @@ static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, struct sw_flow *flow; struct hlist_head *head; int key_start = mask->range.start; - int key_len = mask->range.end; + int key_end = mask->range.end; u32 hash; struct sw_flow_key masked_key; ovs_flow_key_mask(&masked_key, flow_key, mask); - hash = ovs_flow_hash(&masked_key, key_start, key_len); + hash = ovs_flow_hash(&masked_key, key_start, key_end); head = find_bucket(table, hash); hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { if (flow->mask == mask && - __flow_cmp_key(flow, &masked_key, key_start, key_len)) + __flow_cmp_key(flow, &masked_key, key_start, key_end)) return flow; } return NULL; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index d08dcf7..e793051 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -243,7 +243,7 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, const struct ovs_key_ipv4_tunnel *output); bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, - const struct sw_flow_key *key, int key_len); + const struct sw_flow_key *key, int key_end); struct sw_flow_mask { int ref_count; -- cgit v0.10.2 From 14d385b9905920cc0136721316c185c45ee6e26c Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Fri, 23 Aug 2013 13:38:25 -0400 Subject: qlcnic: dcb: Query adapter DCB capabilities. o Query adapter DCB capabilities and populate local data structures with relevant information. o Add QLCNIC_DCB to Kconfig for enabling/disabling DCB. Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 0e17972..f59e6be 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -45,6 +45,17 @@ config QLCNIC_SRIOV This allows for virtual function acceleration in virtualized environments. +config QLCNIC_DCB + bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support" + depends on QLCNIC && DCB + default y + ---help--- + This configuration parameter enables DCB support in QLE83XX + and QLE82XX Converged Ethernet devices. This allows for DCB + get operations support through rtNetlink interface. Only CEE + mode of DCB is supported. PG and PFC values are related only + to Tx. + config QLGE tristate "QLogic QLGE 10Gb Ethernet Driver Support" depends on PCI diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile index 4b1fb3f..a848d29 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/Makefile +++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile @@ -11,3 +11,5 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ qlcnic_minidump.o qlcnic_sriov_common.o qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o + +qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 3f03856..2196279 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -34,6 +34,7 @@ #include "qlcnic_hdr.h" #include "qlcnic_hw.h" #include "qlcnic_83xx_hw.h" +#include "qlcnic_dcb.h" #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 @@ -959,6 +960,7 @@ struct qlcnic_ipaddr { #define __QLCNIC_SRIOV_CAPABLE 11 #define __QLCNIC_MBX_POLL_ENABLE 12 #define __QLCNIC_DIAG_MODE 13 +#define __QLCNIC_DCB_STATE 14 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -1062,6 +1064,7 @@ struct qlcnic_adapter { struct delayed_work fw_work; struct delayed_work idc_aen_work; struct delayed_work mbx_poll_work; + struct qlcnic_dcb *dcb; struct qlcnic_filter_hash fhash; struct qlcnic_filter_hash rx_fhash; @@ -2092,4 +2095,51 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) return status; } + +static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->get_hw_capability) + return dcb->ops->get_hw_capability(adapter); + + return 0; +} + +static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->free) + dcb->ops->free(adapter); +} + +static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->attach) + return dcb->ops->attach(adapter); + + return 0; +} + +static inline int +qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->query_hw_capability) + return dcb->ops->query_hw_capability(adapter, buf); + + return 0; +} + +static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->get_info) + dcb->ops->get_info(adapter); +} #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 6c059f9..d6d1b10 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -67,6 +67,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, + {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, }; const u32 qlcnic_83xx_ext_reg_tbl[] = { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index fb0ef36..a969ac2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -635,6 +635,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) if (adapter->portnum == 0) qlcnic_set_drv_version(adapter); + + qlcnic_dcb_get_info(adapter); qlcnic_83xx_idc_attach_driver(adapter); return 0; @@ -2228,6 +2230,9 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (err) goto disable_mbx_intr; + if (adapter->dcb && qlcnic_dcb_attach(adapter)) + qlcnic_clear_dcb_ops(adapter); + /* Periodically monitor device status */ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index d4f0e95..4af3784 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -39,6 +39,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3}, + {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, }; static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c new file mode 100644 index 0000000..121e492 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -0,0 +1,213 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" + +#define QLC_DCB_MAX_TC 0x8 + +#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1) +#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1) +#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf) +#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf) +#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf) +#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf) + +static void __qlcnic_dcb_free(struct qlcnic_adapter *); +static int __qlcnic_dcb_attach(struct qlcnic_adapter *); +static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *); +static void __qlcnic_dcb_get_info(struct qlcnic_adapter *); + +static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *); + +static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *); + +struct qlcnic_dcb_capability { + bool tsa_capability; + bool ets_capability; + u8 max_num_tc; + u8 max_ets_tc; + u8 max_pfc_tc; + u8 dcb_capability; +}; + +struct qlcnic_dcb_cfg { + struct qlcnic_dcb_capability capability; + u32 version; +}; + +static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { + .free = __qlcnic_dcb_free, + .attach = __qlcnic_dcb_attach, + .query_hw_capability = __qlcnic_dcb_query_hw_capability, + .get_info = __qlcnic_dcb_get_info, + + .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability, +}; + +static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { + .free = __qlcnic_dcb_free, + .attach = __qlcnic_dcb_attach, + .query_hw_capability = __qlcnic_dcb_query_hw_capability, + .get_info = __qlcnic_dcb_get_info, + + .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, +}; + +void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) +{ + if (qlcnic_82xx_check(adapter)) + adapter->dcb->ops = &qlcnic_82xx_dcb_ops; + else if (qlcnic_83xx_check(adapter)) + adapter->dcb->ops = &qlcnic_83xx_dcb_ops; +} + +int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb; + + dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC); + if (!dcb) + return -ENOMEM; + + adapter->dcb = dcb; + qlcnic_set_dcb_ops(adapter); + + return 0; +} + +static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (!dcb) + return; + + kfree(dcb->cfg); + dcb->cfg = NULL; + kfree(dcb); + adapter->dcb = NULL; +} + +static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) +{ + qlcnic_dcb_get_hw_capability(adapter); +} + +static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC); + if (!dcb->cfg) + return -ENOMEM; + + qlcnic_dcb_get_info(adapter); + + return 0; +} + +static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, + char *buf) +{ + struct qlcnic_cmd_args cmd; + u32 mbx_out; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP); + if (err) + return err; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to query DCBX capability, err %d\n", err); + } else { + mbx_out = cmd.rsp.arg[1]; + if (buf) + memcpy(buf, &mbx_out, sizeof(u32)); + } + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val) +{ + struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; + u32 mbx_out; + int err; + + memset(cap, 0, sizeof(struct qlcnic_dcb_capability)); + + err = qlcnic_dcb_query_hw_capability(adapter, (char *)val); + if (err) + return err; + + mbx_out = *val; + if (QLC_DCB_TSA_SUPPORT(mbx_out)) + cap->tsa_capability = true; + + if (QLC_DCB_ETS_SUPPORT(mbx_out)) + cap->ets_capability = true; + + cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out); + cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out); + cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out); + + if (cap->max_num_tc > QLC_DCB_MAX_TC || + cap->max_ets_tc > cap->max_num_tc || + cap->max_pfc_tc > cap->max_num_tc) { + dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n"); + return -EINVAL; + } + + return err; +} + +static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + struct qlcnic_dcb_capability *cap; + u32 mbx_out; + int err; + + err = __qlcnic_dcb_get_capability(adapter, &mbx_out); + if (err) + return err; + + cap = &cfg->capability; + cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED; + + if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) + set_bit(__QLCNIC_DCB_STATE, &adapter->state); + + return err; +} + +static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; + u32 mbx_out; + int err; + + err = __qlcnic_dcb_get_capability(adapter, &mbx_out); + if (err) + return err; + + if (mbx_out & BIT_2) + cap->dcb_capability = DCB_CAP_DCBX_VER_CEE; + if (mbx_out & BIT_3) + cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE; + if (cap->dcb_capability) + cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED; + + if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) + set_bit(__QLCNIC_DCB_STATE, &adapter->state); + + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h new file mode 100644 index 0000000..45dc1fa --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -0,0 +1,32 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_DCBX_H +#define __QLCNIC_DCBX_H + +void qlcnic_clear_dcb_ops(struct qlcnic_adapter *); + +#ifdef CONFIG_QLCNIC_DCB +int __qlcnic_register_dcb(struct qlcnic_adapter *); +#else +static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) +{ return 0; } +#endif + +struct qlcnic_dcb_ops { + void (*free) (struct qlcnic_adapter *); + int (*attach) (struct qlcnic_adapter *); + int (*query_hw_capability) (struct qlcnic_adapter *, char *); + int (*get_hw_capability) (struct qlcnic_adapter *); + void (*get_info) (struct qlcnic_adapter *); +}; + +struct qlcnic_dcb { + struct qlcnic_dcb_ops *ops; + struct qlcnic_dcb_cfg *cfg; +}; +#endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 786366c..243018b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -85,6 +85,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_GET_TEMP_HDR 0x30 #define QLCNIC_CMD_BC_EVENT_SETUP 0x31 #define QLCNIC_CMD_CONFIG_VPORT 0x32 +#define QLCNIC_CMD_DCB_QUERY_CAP 0x34 #define QLCNIC_CMD_GET_MAC_STATS 0x37 #define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 #define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 8321d1a..343c6a0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2121,6 +2121,17 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter) qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); } +static int qlcnic_register_dcb(struct qlcnic_adapter *adapter) +{ + return __qlcnic_register_dcb(adapter); +} + +void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter) +{ + kfree(adapter->dcb); + adapter->dcb = NULL; +} + static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2217,6 +2228,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&adapter->mac_list); + qlcnic_register_dcb(adapter); + if (qlcnic_82xx_check(adapter)) { qlcnic_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; @@ -2245,6 +2258,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_hw; adapter->flags |= QLCNIC_NEED_FLR; + + if (adapter->dcb && qlcnic_dcb_attach(adapter)) + qlcnic_clear_dcb_ops(adapter); + } else if (qlcnic_83xx_check(adapter)) { adapter->max_drv_tx_rings = 1; qlcnic_83xx_check_vf(adapter, ent); @@ -2369,6 +2386,8 @@ static void qlcnic_remove(struct pci_dev *pdev) qlcnic_cancel_idc_work(adapter); ahw = adapter->ahw; + qlcnic_dcb_free(adapter); + unregister_netdev(netdev); qlcnic_sriov_cleanup(adapter); @@ -2411,6 +2430,7 @@ static void qlcnic_remove(struct pci_dev *pdev) destroy_workqueue(adapter->qlcnic_wq); adapter->qlcnic_wq = NULL; } + qlcnic_free_adapter_resources(adapter); kfree(ahw); free_netdev(netdev); @@ -3228,6 +3248,8 @@ qlcnic_attach_work(struct work_struct *work) return; } attach: + qlcnic_dcb_get_info(adapter); + if (netif_running(netdev)) { if (qlcnic_up(adapter, netdev)) goto done; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 2f79ec5..26f9aa6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -538,6 +538,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, if (err) goto err_out_send_channel_term; + if (adapter->dcb && qlcnic_dcb_attach(adapter)) + qlcnic_clear_dcb_ops(adapter); + err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); if (err) goto err_out_send_channel_term; @@ -545,6 +548,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, pci_set_drvdata(adapter->pdev, adapter); dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", adapter->netdev->name); + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, adapter->ahw->idc.delay); return 0; @@ -1577,6 +1581,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) if (err) goto err_out_term_channel; + qlcnic_dcb_get_info(adapter); + return 0; err_out_term_channel: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index eb49cd6..b154048 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1284,6 +1284,7 @@ static const int qlcnic_pf_passthru_supp_cmds[] = { QLCNIC_CMD_GET_STATISTICS, QLCNIC_CMD_GET_PORT_CONFIG, QLCNIC_CMD_GET_LINK_STATUS, + QLCNIC_CMD_DCB_QUERY_CAP, }; static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { -- cgit v0.10.2 From fb859ed6916faeae6b44027d2e0738836a11e8c1 Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Fri, 23 Aug 2013 13:38:26 -0400 Subject: qlcnic: dcb: Get DCB parameters from the adapter. o Populate driver data structures with local, operational, and peer DCB parameters. Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 2196279..fc2972c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -2142,4 +2142,25 @@ static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) if (dcb && dcb->ops->get_info) dcb->ops->get_info(adapter); } + +static inline int +qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->query_cee_param) + return dcb->ops->query_cee_param(adapter, buf, type); + + return 0; +} + +static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->get_cee_cfg) + return dcb->ops->get_cee_cfg(adapter); + + return 0; +} #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index d6d1b10..9b27ed8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -68,6 +68,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, + {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50}, }; const u32 qlcnic_83xx_ext_reg_tbl[] = { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 4af3784..bf3b17e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -40,6 +40,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3}, {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, + {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1}, }; static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 121e492..e43866f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -5,9 +5,14 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include #include "qlcnic.h" +#define QLC_DCB_NUM_PARAM 3 + +#define QLC_DCB_FW_VER 0x2 #define QLC_DCB_MAX_TC 0x8 +#define QLC_DCB_MAX_APP 0x8 #define QLC_DCB_TSA_SUPPORT(V) (V & 0x1) #define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1) @@ -15,6 +20,29 @@ #define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf) #define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf) #define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf) +#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7) +#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff) +#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff) +#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff) +#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1) +#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff) +#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff) + +#define QLC_DCB_LOCAL_PARAM_FWID 0x3 +#define QLC_DCB_OPER_PARAM_FWID 0x1 +#define QLC_DCB_PEER_PARAM_FWID 0x2 + +#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf) +#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1) +#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1) +#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24) + +#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf) +#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1) +#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1) +#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7) +#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X) +#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210) static void __qlcnic_dcb_free(struct qlcnic_adapter *); static int __qlcnic_dcb_attach(struct qlcnic_adapter *); @@ -22,8 +50,12 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *); static void __qlcnic_dcb_get_info(struct qlcnic_adapter *); static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *); +static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); +static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *); static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *); +static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); +static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *); struct qlcnic_dcb_capability { bool tsa_capability; @@ -34,6 +66,28 @@ struct qlcnic_dcb_capability { u8 dcb_capability; }; +struct qlcnic_dcb_param { + u32 hdr_prio_pfc_map[2]; + u32 prio_pg_map[2]; + u32 pg_bw_map[2]; + u32 pg_tsa_map[2]; + u32 app[QLC_DCB_MAX_APP]; +}; + +struct qlcnic_dcb_mbx_params { + /* 1st local, 2nd operational 3rd remote */ + struct qlcnic_dcb_param type[3]; + u32 prio_tc_map; +}; + +struct qlcnic_82xx_dcb_param_mbx_le { + __le32 hdr_prio_pfc_map[2]; + __le32 prio_pg_map[2]; + __le32 pg_bw_map[2]; + __le32 pg_tsa_map[2]; + __le32 app[QLC_DCB_MAX_APP]; +}; + struct qlcnic_dcb_cfg { struct qlcnic_dcb_capability capability; u32 version; @@ -46,6 +100,8 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .get_info = __qlcnic_dcb_get_info, .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability, + .query_cee_param = qlcnic_83xx_dcb_query_cee_param, + .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg, }; static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { @@ -55,8 +111,18 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { .get_info = __qlcnic_dcb_get_info, .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, + .query_cee_param = qlcnic_82xx_dcb_query_cee_param, + .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg, }; +static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_GET_NUMAPP(val); + else + return QLC_83XX_DCB_GET_NUMAPP(val); +} + void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) { if (qlcnic_82xx_check(adapter)) @@ -88,6 +154,8 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter) kfree(dcb->cfg); dcb->cfg = NULL; + kfree(dcb->param); + dcb->param = NULL; kfree(dcb); adapter->dcb = NULL; } @@ -95,19 +163,32 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter) static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) { qlcnic_dcb_get_hw_capability(adapter); + qlcnic_dcb_get_cee_cfg(adapter); } static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter) { struct qlcnic_dcb *dcb = adapter->dcb; + int err = 0; dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC); if (!dcb->cfg) return -ENOMEM; + dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC); + if (!dcb->param) { + err = -ENOMEM; + goto out_free_cfg; + } + qlcnic_dcb_get_info(adapter); return 0; +out_free_cfg: + kfree(dcb->cfg); + dcb->cfg = NULL; + + return err; } static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, @@ -189,6 +270,104 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) return err; } +static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter, + char *buf, u8 type) +{ + u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le); + struct qlcnic_82xx_dcb_param_mbx_le *prsp_le; + struct device *dev = &adapter->pdev->dev; + dma_addr_t cardrsp_phys_addr; + struct qlcnic_dcb_param rsp; + struct qlcnic_cmd_args cmd; + u64 phys_addr; + void *addr; + int err, i; + + switch (type) { + case QLC_DCB_LOCAL_PARAM_FWID: + case QLC_DCB_OPER_PARAM_FWID: + case QLC_DCB_PEER_PARAM_FWID: + break; + default: + dev_err(dev, "Invalid parameter type %d\n", type); + return -EINVAL; + } + + addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr, + GFP_KERNEL); + if (addr == NULL) + return -ENOMEM; + + prsp_le = addr; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); + if (err) + goto out_free_rsp; + + phys_addr = cardrsp_phys_addr; + cmd.req.arg[1] = size | (type << 16); + cmd.req.arg[2] = MSD(phys_addr); + cmd.req.arg[3] = LSD(phys_addr); + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(dev, "Failed to query DCBX parameter, err %d\n", err); + goto out; + } + + memset(&rsp, 0, sizeof(struct qlcnic_dcb_param)); + rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]); + rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]); + rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]); + rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]); + rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]); + rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]); + rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]); + rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]); + + for (i = 0; i < QLC_DCB_MAX_APP; i++) + rsp.app[i] = le32_to_cpu(prsp_le->app[i]); + + if (buf) + memcpy(buf, &rsp, size); +out: + qlcnic_free_mbx_args(&cmd); + +out_free_rsp: + dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr); + + return err; +} + +static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb_mbx_params *mbx; + int err; + + mbx = adapter->dcb->param; + if (!mbx) + return 0; + + err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0], + QLC_DCB_LOCAL_PARAM_FWID); + if (err) + return err; + + err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1], + QLC_DCB_OPER_PARAM_FWID); + if (err) + return err; + + err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2], + QLC_DCB_PEER_PARAM_FWID); + if (err) + return err; + + mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP; + + return err; +} + static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) { struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; @@ -211,3 +390,72 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) return err; } + +static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter, + char *buf, u8 idx) +{ + struct qlcnic_dcb_mbx_params mbx_out; + int err, i, j, k, max_app, size; + struct qlcnic_dcb_param *each; + struct qlcnic_cmd_args cmd; + u32 val; + char *p; + + size = 0; + memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params)); + memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params)); + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); + if (err) + return err; + + cmd.req.arg[0] |= QLC_DCB_FW_VER << 29; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to query DCBX param, err %d\n", err); + goto out; + } + + mbx_out.prio_tc_map = cmd.rsp.arg[1]; + p = memcpy(buf, &mbx_out, sizeof(u32)); + k = 2; + p += sizeof(u32); + + for (j = 0; j < QLC_DCB_NUM_PARAM; j++) { + each = &mbx_out.type[j]; + + each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++]; + each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++]; + each->prio_pg_map[0] = cmd.rsp.arg[k++]; + each->prio_pg_map[1] = cmd.rsp.arg[k++]; + each->pg_bw_map[0] = cmd.rsp.arg[k++]; + each->pg_bw_map[1] = cmd.rsp.arg[k++]; + each->pg_tsa_map[0] = cmd.rsp.arg[k++]; + each->pg_tsa_map[1] = cmd.rsp.arg[k++]; + val = each->hdr_prio_pfc_map[0]; + + max_app = qlcnic_dcb_get_num_app(adapter, val); + for (i = 0; i < max_app; i++) + each->app[i] = cmd.rsp.arg[i + k]; + + size = 16 * sizeof(u32); + memcpy(p, &each->hdr_prio_pfc_map[0], size); + p += size; + if (j == 0) + k = 18; + else + k = 34; + } +out: + qlcnic_free_mbx_args(&cmd); + + return err; +} + +static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + return qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 45dc1fa..d1775d7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -23,10 +23,13 @@ struct qlcnic_dcb_ops { int (*query_hw_capability) (struct qlcnic_adapter *, char *); int (*get_hw_capability) (struct qlcnic_adapter *); void (*get_info) (struct qlcnic_adapter *); + int (*query_cee_param) (struct qlcnic_adapter *, char *, u8); + int (*get_cee_cfg) (struct qlcnic_adapter *); }; struct qlcnic_dcb { - struct qlcnic_dcb_ops *ops; - struct qlcnic_dcb_cfg *cfg; + struct qlcnic_dcb_mbx_params *param; + struct qlcnic_dcb_ops *ops; + struct qlcnic_dcb_cfg *cfg; }; #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 243018b..d2276b8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -86,6 +86,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_BC_EVENT_SETUP 0x31 #define QLCNIC_CMD_CONFIG_VPORT 0x32 #define QLCNIC_CMD_DCB_QUERY_CAP 0x34 +#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35 #define QLCNIC_CMD_GET_MAC_STATS 0x37 #define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 #define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index b154048..3c0e02a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1285,6 +1285,7 @@ static const int qlcnic_pf_passthru_supp_cmds[] = { QLCNIC_CMD_GET_PORT_CONFIG, QLCNIC_CMD_GET_LINK_STATUS, QLCNIC_CMD_DCB_QUERY_CAP, + QLCNIC_CMD_DCB_QUERY_PARAM, }; static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { -- cgit v0.10.2 From 2d8ebcab86051f2cd7f207edb513995348b78213 Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Fri, 23 Aug 2013 13:38:27 -0400 Subject: qlcnic: dcb: Register DCB AEN handler. o Adapter sends Asynchronous Event Notifications to the driver when there are changes in the switch or adapter DCBX configuration. AEN handler updates the driver DCBX parameters. Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fc2972c..1e868ee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -816,6 +816,7 @@ struct qlcnic_mac_list_s { #define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f #define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D +#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90 #define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ #define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ @@ -961,6 +962,7 @@ struct qlcnic_ipaddr { #define __QLCNIC_MBX_POLL_ENABLE 12 #define __QLCNIC_DIAG_MODE 13 #define __QLCNIC_DCB_STATE 14 +#define __QLCNIC_DCB_IN_AEN 15 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -2163,4 +2165,22 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) return 0; } + +static inline void +qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->register_aen) + dcb->ops->register_aen(adapter, flag); +} + +static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter, + void *msg) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->handle_aen) + dcb->ops->handle_aen(adapter, msg); +} #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 9b27ed8..8fce1d3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -897,6 +897,9 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n", QLCNIC_MBX_RSP(event[0])); break; + case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT: + qlcnic_dcb_handle_aen(adapter, (void *)&event[1]); + break; default: dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", QLCNIC_MBX_RSP(event[0])); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index e43866f..3477818 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -10,6 +10,7 @@ #define QLC_DCB_NUM_PARAM 3 +#define QLC_DCB_AEN_BIT 0x2 #define QLC_DCB_FW_VER 0x2 #define QLC_DCB_MAX_TC 0x8 #define QLC_DCB_MAX_APP 0x8 @@ -44,6 +45,8 @@ #define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X) #define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210) +static void qlcnic_dcb_aen_work(struct work_struct *); + static void __qlcnic_dcb_free(struct qlcnic_adapter *); static int __qlcnic_dcb_attach(struct qlcnic_adapter *); static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *); @@ -52,10 +55,13 @@ static void __qlcnic_dcb_get_info(struct qlcnic_adapter *); static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *); static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *); +static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *); static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *); static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *); +static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool); +static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *); struct qlcnic_dcb_capability { bool tsa_capability; @@ -102,6 +108,8 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability, .query_cee_param = qlcnic_83xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg, + .register_aen = qlcnic_83xx_dcb_register_aen, + .handle_aen = qlcnic_83xx_dcb_handle_aen, }; static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { @@ -113,6 +121,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, .query_cee_param = qlcnic_82xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg, + .handle_aen = qlcnic_82xx_dcb_handle_aen, }; static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) @@ -140,6 +149,7 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) return -ENOMEM; adapter->dcb = dcb; + dcb->adapter = adapter; qlcnic_set_dcb_ops(adapter); return 0; @@ -152,6 +162,18 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter) if (!dcb) return; + qlcnic_dcb_register_aen(adapter, 0); + + while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) + usleep_range(10000, 11000); + + cancel_delayed_work_sync(&dcb->aen_work); + + if (dcb->wq) { + destroy_workqueue(dcb->wq); + dcb->wq = NULL; + } + kfree(dcb->cfg); dcb->cfg = NULL; kfree(dcb->param); @@ -164,6 +186,7 @@ static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) { qlcnic_dcb_get_hw_capability(adapter); qlcnic_dcb_get_cee_cfg(adapter); + qlcnic_dcb_register_aen(adapter, 1); } static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter) @@ -171,9 +194,20 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter) struct qlcnic_dcb *dcb = adapter->dcb; int err = 0; + INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work); + + dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); + if (!dcb->wq) { + dev_err(&adapter->pdev->dev, + "DCB workqueue allocation failed. DCB will be disabled\n"); + return -1; + } + dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC); - if (!dcb->cfg) - return -ENOMEM; + if (!dcb->cfg) { + err = -ENOMEM; + goto out_free_wq; + } dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC); if (!dcb->param) { @@ -188,6 +222,10 @@ out_free_cfg: kfree(dcb->cfg); dcb->cfg = NULL; +out_free_wq: + destroy_workqueue(dcb->wq); + dcb->wq = NULL; + return err; } @@ -368,6 +406,29 @@ static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) return err; } +static void qlcnic_dcb_aen_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + struct qlcnic_dcb *dcb; + + dcb = container_of(work, struct qlcnic_dcb, aen_work.work); + adapter = dcb->adapter; + + qlcnic_dcb_get_cee_cfg(adapter); + clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state); +} + +static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter, + void *data) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) + return; + + queue_delayed_work(dcb->wq, &dcb->aen_work, 0); +} + static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) { struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; @@ -459,3 +520,43 @@ static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) return qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0); } + +static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter, + bool flag) +{ + u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC); + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, val); + if (err) + return err; + + cmd.req.arg[1] = QLC_DCB_AEN_BIT; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n", + (flag ? "register" : "unregister"), err); + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter, + void *data) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + u32 *val = data; + + if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) + return; + + if (*val & BIT_8) + set_bit(__QLCNIC_DCB_STATE, &adapter->state); + else + clear_bit(__QLCNIC_DCB_STATE, &adapter->state); + + queue_delayed_work(dcb->wq, &dcb->aen_work, 0); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index d1775d7..6961dac 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -25,10 +25,15 @@ struct qlcnic_dcb_ops { void (*get_info) (struct qlcnic_adapter *); int (*query_cee_param) (struct qlcnic_adapter *, char *, u8); int (*get_cee_cfg) (struct qlcnic_adapter *); + int (*register_aen) (struct qlcnic_adapter *, bool); + void (*handle_aen) (struct qlcnic_adapter *, void *); }; struct qlcnic_dcb { struct qlcnic_dcb_mbx_params *param; + struct qlcnic_adapter *adapter; + struct delayed_work aen_work; + struct workqueue_struct *wq; struct qlcnic_dcb_ops *ops; struct qlcnic_dcb_cfg *cfg; }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index d2276b8..272c356 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -125,6 +125,7 @@ enum qlcnic_regs { #define QLCNIC_MBX_COMP_EVENT 0x8100 #define QLCNIC_MBX_REQUEST_EVENT 0x8101 #define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 +#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110 #define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130 #define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 89f6dff..8d06f88 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1010,6 +1010,9 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index, break; } break; + case QLCNIC_C2H_OPCODE_GET_DCB_AEN: + qlcnic_dcb_handle_aen(adapter, (void *)&msg); + break; default: break; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 3c0e02a..2d6faf0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1286,6 +1286,8 @@ static const int qlcnic_pf_passthru_supp_cmds[] = { QLCNIC_CMD_GET_LINK_STATUS, QLCNIC_CMD_DCB_QUERY_CAP, QLCNIC_CMD_DCB_QUERY_PARAM, + QLCNIC_CMD_INIT_NIC_FUNC, + QLCNIC_CMD_STOP_NIC_FUNC, }; static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { -- cgit v0.10.2 From 48365e4852759c4a3710490b0d647aac1321e8c9 Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Fri, 23 Aug 2013 13:38:28 -0400 Subject: qlcnic: dcb: Add support for CEE Netlink interface. o Adapter and driver supports only CEE dcbnl ops. Only GET callbacks within dcbnl ops are supported currently. Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 1e868ee..3364924 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -2183,4 +2183,12 @@ static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter, if (dcb && dcb->ops->handle_aen) dcb->ops->handle_aen(adapter, msg); } + +static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb = adapter->dcb; + + if (dcb && dcb->ops->init_dcbnl_ops) + dcb->ops->init_dcbnl_ops(adapter); +} #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 3477818..2e10e79 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -9,11 +9,18 @@ #include "qlcnic.h" #define QLC_DCB_NUM_PARAM 3 +#define QLC_DCB_LOCAL_IDX 0 +#define QLC_DCB_OPER_IDX 1 +#define QLC_DCB_PEER_IDX 2 + +#define QLC_DCB_GET_MAP(V) (1 << V) #define QLC_DCB_AEN_BIT 0x2 #define QLC_DCB_FW_VER 0x2 #define QLC_DCB_MAX_TC 0x8 #define QLC_DCB_MAX_APP 0x8 +#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC +#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC #define QLC_DCB_TSA_SUPPORT(V) (V & 0x1) #define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1) @@ -45,8 +52,12 @@ #define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X) #define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210) +static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops; + static void qlcnic_dcb_aen_work(struct work_struct *); +static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *); +static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *); static void __qlcnic_dcb_free(struct qlcnic_adapter *); static int __qlcnic_dcb_attach(struct qlcnic_adapter *); static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *); @@ -94,12 +105,72 @@ struct qlcnic_82xx_dcb_param_mbx_le { __le32 app[QLC_DCB_MAX_APP]; }; +enum qlcnic_dcb_selector { + QLC_SELECTOR_DEF = 0x0, + QLC_SELECTOR_ETHER, + QLC_SELECTOR_TCP, + QLC_SELECTOR_UDP, +}; + +enum qlcnic_dcb_prio_type { + QLC_PRIO_NONE = 0, + QLC_PRIO_GROUP, + QLC_PRIO_LINK, +}; + +enum qlcnic_dcb_pfc_type { + QLC_PFC_DISABLED = 0, + QLC_PFC_FULL, + QLC_PFC_TX, + QLC_PFC_RX +}; + +struct qlcnic_dcb_prio_cfg { + bool valid; + enum qlcnic_dcb_pfc_type pfc_type; +}; + +struct qlcnic_dcb_pg_cfg { + bool valid; + u8 total_bw_percent; /* of Link/ port BW */ + u8 prio_count; + u8 tsa_type; +}; + +struct qlcnic_dcb_tc_cfg { + bool valid; + struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO]; + enum qlcnic_dcb_prio_type prio_type; /* always prio_link */ + u8 link_percent; /* % of link bandwidth */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 up_tc_map; + u8 pgid; +}; + +struct qlcnic_dcb_app { + bool valid; + enum qlcnic_dcb_selector selector; + u16 protocol; + u8 priority; +}; + +struct qlcnic_dcb_cee { + struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC]; + struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG]; + struct qlcnic_dcb_app app[QLC_DCB_MAX_APP]; + bool tc_param_valid; + bool pfc_mode_enable; +}; + struct qlcnic_dcb_cfg { + /* 0 - local, 1 - operational, 2 - remote */ + struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM]; struct qlcnic_dcb_capability capability; u32 version; }; static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { + .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, .query_hw_capability = __qlcnic_dcb_query_hw_capability, @@ -113,6 +184,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { }; static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { + .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, .query_hw_capability = __qlcnic_dcb_query_hw_capability, @@ -132,6 +204,50 @@ static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) return QLC_83XX_DCB_GET_NUMAPP(val); } +static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter, + u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_PFC_VALID(val); + else + return QLC_83XX_DCB_PFC_VALID(val); +} + +static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter, + u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_TSA_VALID(val); + else + return QLC_83XX_DCB_TSA_VALID(val); +} + +static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter, + u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_GET_PRIOMAP_APP(val); + else + return QLC_83XX_DCB_GET_PRIOMAP_APP(val); +} + +static int qlcnic_dcb_prio_count(u8 up_tc_map) +{ + int j; + + for (j = 0; j < QLC_DCB_MAX_TC; j++) + if (up_tc_map & QLC_DCB_GET_MAP(j)) + break; + + return j; +} + +static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter) +{ + if (test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; +} + void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) { if (qlcnic_82xx_check(adapter)) @@ -403,6 +519,8 @@ static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP; + qlcnic_dcb_data_cee_param_map(adapter); + return err; } @@ -517,8 +635,15 @@ out: static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) { struct qlcnic_dcb *dcb = adapter->dcb; + int err; - return qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0); + err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0); + if (err) + return err; + + qlcnic_dcb_data_cee_param_map(adapter); + + return err; } static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter, @@ -560,3 +685,495 @@ static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter, queue_delayed_work(dcb->wq, &dcb->aen_work, 0); } + +static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx, + struct qlcnic_dcb_param *each, + struct qlcnic_dcb_cee *type) +{ + struct qlcnic_dcb_tc_cfg *tc_cfg; + u8 i, tc, pgid; + + for (i = 0; i < QLC_DCB_MAX_PRIO; i++) { + tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i); + tc_cfg = &type->tc_cfg[tc]; + tc_cfg->valid = true; + tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i); + + if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) && + type->pfc_mode_enable) { + tc_cfg->prio_cfg[i].valid = true; + tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL; + } + + if (i < 4) + pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i); + else + pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i); + + tc_cfg->pgid = pgid; + + tc_cfg->prio_type = QLC_PRIO_LINK; + type->pg_cfg[tc_cfg->pgid].prio_count++; + } +} + +static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each, + struct qlcnic_dcb_cee *type) +{ + struct qlcnic_dcb_pg_cfg *pg_cfg; + u8 i, tsa, bw_per; + + for (i = 0; i < QLC_DCB_MAX_PG; i++) { + pg_cfg = &type->pg_cfg[i]; + pg_cfg->valid = true; + + if (i < 4) { + bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i); + tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i); + } else { + bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i); + tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i); + } + + pg_cfg->total_bw_percent = bw_per; + pg_cfg->tsa_type = tsa; + } +} + +static void +qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx, + struct qlcnic_dcb_param *each, + struct qlcnic_dcb_cee *type) +{ + struct qlcnic_dcb_app *app; + u8 i, num_app, map, cnt; + struct dcb_app new_app; + + num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]); + for (i = 0; i < num_app; i++) { + app = &type->app[i]; + app->valid = true; + + /* Only for CEE (-1) */ + app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1; + new_app.selector = app->selector; + app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]); + new_app.protocol = app->protocol; + map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]); + cnt = qlcnic_dcb_prio_count(map); + + if (cnt >= QLC_DCB_MAX_TC) + cnt = 0; + + app->priority = cnt; + new_app.priority = cnt; + + if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops) + dcb_setapp(adapter->netdev, &new_app); + } +} + +static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx) +{ + struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param; + struct qlcnic_dcb_param *each = &mbx->type[idx]; + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + struct qlcnic_dcb_cee *type = &cfg->type[idx]; + + type->tc_param_valid = false; + type->pfc_mode_enable = false; + memset(type->tc_cfg, 0, + sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC); + memset(type->pg_cfg, 0, + sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC); + + if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && + cfg->capability.max_pfc_tc) + type->pfc_mode_enable = true; + + if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && + cfg->capability.max_ets_tc) + type->tc_param_valid = true; + + qlcnic_dcb_fill_cee_tc_params(mbx, each, type); + qlcnic_dcb_fill_cee_pg_params(each, type); + qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type); +} + +static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter) +{ + int i; + + for (i = 0; i < QLC_DCB_NUM_PARAM; i++) + qlcnic_dcb_map_cee_params(adapter, i); + + dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +} + +static u8 qlcnic_dcb_get_state(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + return test_bit(__QLCNIC_DCB_STATE, &adapter->state); +} + +static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr) +{ + memcpy(addr, netdev->dev_addr, netdev->addr_len); +} + +static void +qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, + u8 *pgid, u8 *bw_per, u8 *up_tc_map) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_tc_cfg *tc_cfg, *temp; + struct qlcnic_dcb_cee *type; + u8 i, cnt, pg; + + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + *prio = *pgid = *bw_per = *up_tc_map = 0; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || + !type->tc_param_valid) + return; + + if (tc < 0 || (tc > QLC_DCB_MAX_TC)) + return; + + tc_cfg = &type->tc_cfg[tc]; + if (!tc_cfg->valid) + return; + + *pgid = tc_cfg->pgid; + *prio = tc_cfg->prio_type; + *up_tc_map = tc_cfg->up_tc_map; + pg = *pgid; + + for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) { + temp = &type->tc_cfg[i]; + if (temp->valid && (pg == temp->pgid)) + cnt++; + } + + tc_cfg->bwg_percent = (100 / cnt); + *bw_per = tc_cfg->bwg_percent; +} + +static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, + u8 *bw_pct) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_pg_cfg *pgcfg; + struct qlcnic_dcb_cee *type; + + *bw_pct = 0; + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || + !type->tc_param_valid) + return; + + if (pgid < 0 || pgid > QLC_DCB_MAX_PG) + return; + + pgcfg = &type->pg_cfg[pgid]; + if (!pgcfg->valid) + return; + + *bw_pct = pgcfg->total_bw_percent; +} + +static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_tc_cfg *tc_cfg; + u8 val = QLC_DCB_GET_MAP(prio); + struct qlcnic_dcb_cee *type; + u8 i; + + *setting = 0; + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || + !type->pfc_mode_enable) + return; + + for (i = 0; i < QLC_DCB_MAX_TC; i++) { + tc_cfg = &type->tc_cfg[i]; + if (!tc_cfg->valid) + continue; + + if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid)) + *setting = tc_cfg->prio_cfg[prio].pfc_type; + } +} + +static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, + u8 *cap) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + switch (capid) { + case DCB_CAP_ATTR_PG: + case DCB_CAP_ATTR_UP2TC: + case DCB_CAP_ATTR_PFC: + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_PG_TCS: + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; /* 8 priorities for PGs */ + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcb->cfg->capability.dcb_capability; + break; + default: + *cap = false; + } + + return 0; +} + +static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return -EINVAL; + + switch (attr) { + case DCB_NUMTCS_ATTR_PG: + *num = cfg->capability.max_ets_tc; + return 0; + case DCB_NUMTCS_ATTR_PFC: + *num = cfg->capability.max_pfc_tc; + return 0; + default: + return -EINVAL; + } +} + +static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + return dcb_getapp(netdev, &app); +} + +static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb *dcb = adapter->dcb; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable; +} + +static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + return cfg->capability.dcb_capability; +} + +static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *type; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 1; + + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + *flag = 0; + + switch (fid) { + case DCB_FEATCFG_ATTR_PG: + if (type->tc_param_valid) + *flag |= DCB_FEATCFG_ENABLE; + else + *flag |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (type->pfc_mode_enable) { + if (type->tc_cfg[0].prio_cfg[0].pfc_type) + *flag |= DCB_FEATCFG_ENABLE; + } else { + *flag |= DCB_FEATCFG_ERROR; + } + break; + case DCB_FEATCFG_ATTR_APP: + *flag |= DCB_FEATCFG_ENABLE; + break; + default: + netdev_err(netdev, "Invalid Feature ID %d\n", fid); + return 1; + } + + return 0; +} + +static inline void +qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static inline void +qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) +{ + *bw_pct = 0; +} + +static int qlcnic_dcb_peer_app_info(struct net_device *netdev, + struct dcb_peer_app_info *info, + u16 *app_count) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *peer; + int i; + + *app_count = 0; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0; i < QLC_DCB_MAX_APP; i++) { + if (peer->app[i].valid) + (*app_count)++; + } + + return 0; +} + +static int qlcnic_dcb_peer_app_table(struct net_device *netdev, + struct dcb_app *table) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *peer; + struct qlcnic_dcb_app *app; + int i, j; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) { + app = &peer->app[i]; + if (!app->valid) + continue; + + table[j].selector = app->selector; + table[j].priority = app->priority; + table[j++].protocol = app->protocol; + } + + return 0; +} + +static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev, + struct cee_pg *pg) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *peer; + u8 i, j, k, map; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) { + if (!peer->pg_cfg[i].valid) + continue; + + pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent; + + for (k = 0; k < QLC_DCB_MAX_TC; k++) { + if (peer->tc_cfg[i].valid && + (peer->tc_cfg[i].pgid == i)) { + map = peer->tc_cfg[i].up_tc_map; + pg->prio_pg[j++] = map; + break; + } + } + } + + return 0; +} + +static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + struct qlcnic_dcb_tc_cfg *tc; + struct qlcnic_dcb_cee *peer; + u8 i, setting, prio; + + pfc->pfc_en = 0; + + if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + return 0; + + peer = &cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0; i < QLC_DCB_MAX_TC; i++) { + tc = &peer->tc_cfg[i]; + prio = qlcnic_dcb_prio_count(tc->up_tc_map); + + setting = 0; + qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting); + if (setting) + pfc->pfc_en |= QLC_DCB_GET_MAP(i); + } + + pfc->tcs_supported = cfg->capability.max_pfc_tc; + + return 0; +} + +static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = { + .getstate = qlcnic_dcb_get_state, + .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr, + .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx, + .getpfccfg = qlcnic_dcb_get_pfc_cfg, + .getcap = qlcnic_dcb_get_capability, + .getnumtcs = qlcnic_dcb_get_num_tcs, + .getapp = qlcnic_dcb_get_app, + .getpfcstate = qlcnic_dcb_get_pfc_state, + .getdcbx = qlcnic_dcb_get_dcbx, + .getfeatcfg = qlcnic_dcb_get_feat_cfg, + + .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx, + + .peer_getappinfo = qlcnic_dcb_peer_app_info, + .peer_getapptable = qlcnic_dcb_peer_app_table, + .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg, + .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc, +}; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 6961dac..b87ce9f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -18,6 +18,7 @@ static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) #endif struct qlcnic_dcb_ops { + void (*init_dcbnl_ops) (struct qlcnic_adapter *); void (*free) (struct qlcnic_adapter *); int (*attach) (struct qlcnic_adapter *); int (*query_hw_capability) (struct qlcnic_adapter *, char *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 343c6a0..df96f66 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2028,6 +2028,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, return err; } + qlcnic_dcb_init_dcbnl_ops(adapter); + return 0; } -- cgit v0.10.2 From abd939d2fde990ddf45d5a61efcb3511f059805f Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Fri, 23 Aug 2013 13:38:29 -0400 Subject: qlcnic: Update version to 5.3.49. Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 3364924..156a78e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 48 -#define QLCNIC_LINUX_VERSIONID "5.3.48" +#define _QLCNIC_LINUX_SUBVERSION 49 +#define QLCNIC_LINUX_VERSIONID "5.3.49" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v0.10.2 From b1dcdc68b1f4cc77f603d7507f7a14f1f4864d41 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 23 Aug 2013 16:16:33 +0200 Subject: net: tcp_probe: allow more advanced ingress filtering by mark Currently, the tcp_probe snooper can either filter packets by a given port (handed to the module via module parameter e.g. port=80) or lets all TCP traffic pass (port=0, default). When a port is specified, the port number is tested against the sk's source/destination port. Thus, if one of them matches, the information will be further processed for the log. As this is quite limited, allow for more advanced filtering possibilities which can facilitate debugging/analysis with the help of the tcp_probe snooper. Therefore, similarly as added to BPF machine in commit 7e75f93e ("pkt_sched: ingress socket filter by mark"), add the possibility to use skb->mark as a filter. If the mark is not being used otherwise, this allows ingress filtering by flow (e.g. in order to track updates from only a single flow, or a subset of all flows for a given port) and other things such as dynamic logging and reconfiguration without removing/re-inserting the tcp_probe module, etc. Simple example: insmod net/ipv4/tcp_probe.ko fwmark=8888 full=1 ... iptables -A INPUT -i eth4 -t mangle -p tcp --dport 22 \ --sport 60952 -j MARK --set-mark 8888 [... sampling interval ...] iptables -D INPUT -i eth4 -t mangle -p tcp --dport 22 \ --sport 60952 -j MARK --set-mark 8888 The current option to filter by a given port is still being preserved. A similar approach could be done for the sctp_probe module as a follow-up. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 301a3ef..622a437 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -46,6 +46,10 @@ static unsigned int bufsize __read_mostly = 4096; MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); module_param(bufsize, uint, 0); +static unsigned int fwmark __read_mostly = 0; +MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)"); +module_param(fwmark, uint, 0); + static int full __read_mostly; MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); module_param(full, int, 0); @@ -124,9 +128,11 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcp_sock *tp = tcp_sk(sk); const struct inet_sock *inet = inet_sk(sk); - /* Only update if port matches */ - if ((port == 0 || ntohs(inet->inet_dport) == port || - ntohs(inet->inet_sport) == port) && + /* Only update if port or skb mark matches */ + if (((port == 0 && fwmark == 0) || + ntohs(inet->inet_dport) == port || + ntohs(inet->inet_sport) == port || + (fwmark > 0 && skb->mark == fwmark)) && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { spin_lock(&tcp_probe.lock); @@ -284,7 +290,8 @@ static __init int tcpprobe_init(void) if (ret) goto err1; - pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize); + pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n", + port, fwmark, bufsize); return 0; err1: remove_proc_entry(procname, init_net.proc_net); -- cgit v0.10.2 From f2f781a759e0471e440668c7c90c03696b73b65e Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 27 Aug 2013 16:57:30 +0530 Subject: be2net: use EQ_CREATEv2 for SH-R EQ_CREATEv2 explicitly returns the msix-index associated with a EQ. For SH-R this is needed if EQs need to be deleted and re-created without resetting a function. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 11c815d..7c5f9f2 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -189,6 +189,7 @@ struct be_eq_obj { u32 cur_eqd; /* in usecs */ u8 idx; /* array index */ + u8 msix_idx; u16 tx_budget; u16 spurious_intr; struct napi_struct napi; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 85923e2..e69835c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -678,31 +678,6 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, } } -/* Converts interrupt delay in microseconds to multiplier value */ -static u32 eq_delay_to_mult(u32 usec_delay) -{ -#define MAX_INTR_RATE 651042 - const u32 round = 10; - u32 multiplier; - - if (usec_delay == 0) - multiplier = 0; - else { - u32 interrupt_rate = 1000000 / usec_delay; - /* Max delay, corresponding to the lowest interrupt rate */ - if (interrupt_rate == 0) - multiplier = 1023; - else { - multiplier = (MAX_INTR_RATE - interrupt_rate) * round; - multiplier /= interrupt_rate; - /* Round the multiplier to the closest value.*/ - multiplier = (multiplier + round/2) / round; - multiplier = min(multiplier, (u32)1023); - } - } - return multiplier; -} - static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) { struct be_dma_mem *mbox_mem = &adapter->mbox_mem; @@ -790,13 +765,12 @@ int be_cmd_fw_clean(struct be_adapter *adapter) return status; } -int be_cmd_eq_create(struct be_adapter *adapter, - struct be_queue_info *eq, int eq_delay) +int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) { struct be_mcc_wrb *wrb; struct be_cmd_req_eq_create *req; - struct be_dma_mem *q_mem = &eq->dma_mem; - int status; + struct be_dma_mem *q_mem = &eqo->q.dma_mem; + int status, ver = 0; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -807,15 +781,18 @@ int be_cmd_eq_create(struct be_adapter *adapter, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); + /* Support for EQ_CREATEv2 available only SH-R onwards */ + if (!(BEx_chip(adapter) || lancer_chip(adapter))) + ver = 2; + + req->hdr.version = ver; req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); /* 4byte eqe*/ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); AMAP_SET_BITS(struct amap_eq_context, count, req->context, - __ilog2_u32(eq->len/256)); - AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, - eq_delay_to_mult(eq_delay)); + __ilog2_u32(eqo->q.len / 256)); be_dws_cpu_to_le(req->context, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); @@ -823,8 +800,10 @@ int be_cmd_eq_create(struct be_adapter *adapter, status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); - eq->id = le16_to_cpu(resp->eq_id); - eq->created = true; + eqo->q.id = le16_to_cpu(resp->eq_id); + eqo->msix_idx = + (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; + eqo->q.created = true; } mutex_unlock(&adapter->mbox_lock); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 6237192..880c85a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -307,7 +307,7 @@ struct be_cmd_req_eq_create { struct be_cmd_resp_eq_create { struct be_cmd_resp_hdr resp_hdr; u16 eq_id; /* sword */ - u16 rsvd0; /* sword */ + u16 msix_idx; /* available only in v2 */ } __packed; /******************** Mac query ***************************/ @@ -1851,8 +1851,7 @@ extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u32 *if_handle, u32 domain); extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain); -extern int be_cmd_eq_create(struct be_adapter *adapter, - struct be_queue_info *eq, int eq_delay); +extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo); extern int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, struct be_queue_info *eq, bool no_delay, int num_cqe_dma_coalesce); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08f6417..6f040d8 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1939,7 +1939,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) if (rc) return rc; - rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd); + rc = be_cmd_eq_create(adapter, eqo); if (rc) return rc; } @@ -2443,7 +2443,7 @@ done: static inline int be_msix_vec_get(struct be_adapter *adapter, struct be_eq_obj *eqo) { - return adapter->msix_entries[eqo->idx].vector; + return adapter->msix_entries[eqo->msix_idx].vector; } static int be_msix_register(struct be_adapter *adapter) -- cgit v0.10.2 From 150d58c7094ee02e5e3e876e288a95e254c5e830 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Tue, 27 Aug 2013 16:57:31 +0530 Subject: be2net: Fixup profile management routines 1) Parse PCIe descriptor for max-VFs supported by HW 2) Cleanup NIC descriptor parsing in get_func/profile_config() routines 3) Use common struct definitions for v0 and v1 versions of GET_FUNC_CONFIG Signed-off-by: Vasundhara Volam Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index e69835c..bc6f958 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -3066,25 +3066,40 @@ err: return status; } -static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count, - u32 max_buf_size) +static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count) { - struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf; + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; int i; for (i = 0; i < desc_count; i++) { - desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE; - if (((void *)desc + desc->desc_len) > - (void *)(buf + max_buf_size)) - return NULL; + if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) + return (struct be_nic_res_desc *)hdr; - if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || - desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1) - return desc; - - desc = (void *)desc + desc->desc_len; + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; } + return NULL; +} + +static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, + u32 desc_count) +{ + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; + struct be_pcie_res_desc *pcie; + int i; + + for (i = 0; i < desc_count; i++) { + if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { + pcie = (struct be_pcie_res_desc *)hdr; + if (pcie->pf_num == devfn) + return pcie; + } + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; + } return NULL; } @@ -3128,10 +3143,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) if (!status) { struct be_cmd_resp_get_func_config *resp = cmd.va; u32 desc_count = le32_to_cpu(resp->desc_count); - struct be_nic_resource_desc *desc; + struct be_nic_res_desc *desc; - desc = be_get_nic_desc(resp->func_param, desc_count, - sizeof(resp->func_param)); + desc = be_get_nic_desc(resp->func_param, desc_count); if (!desc) { status = -EINVAL; goto err; @@ -3223,51 +3237,51 @@ err: int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, u16 *txq_count, u8 domain) { + struct be_cmd_resp_get_profile_config *resp; + struct be_pcie_res_desc *pcie; + struct be_nic_res_desc *nic; struct be_queue_info *mccq = &adapter->mcc_obj.q; struct be_dma_mem cmd; + u32 desc_count; int status; memset(&cmd, 0, sizeof(struct be_dma_mem)); - if (!lancer_chip(adapter)) - cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1); - else - cmd.size = sizeof(struct be_cmd_resp_get_profile_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, - &cmd.dma); - if (!cmd.va) { - dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); + cmd.size = sizeof(struct be_cmd_resp_get_profile_config); + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + if (!cmd.va) return -ENOMEM; - } if (!mccq->created) status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd); else status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd); - if (!status) { - struct be_cmd_resp_get_profile_config *resp = cmd.va; - u32 desc_count = le32_to_cpu(resp->desc_count); - struct be_nic_resource_desc *desc; + if (status) + goto err; - desc = be_get_nic_desc(resp->func_param, desc_count, - sizeof(resp->func_param)); + resp = cmd.va; + desc_count = le32_to_cpu(resp->desc_count); - if (!desc) { - status = -EINVAL; - goto err; - } + pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, + desc_count); + if (pcie) + adapter->dev_num_vfs = le16_to_cpu(pcie->num_vfs); + + nic = be_get_nic_desc(resp->func_param, desc_count); + if (nic) { if (cap_flags) - *cap_flags = le32_to_cpu(desc->cap_flags); + *cap_flags = le32_to_cpu(nic->cap_flags); if (txq_count) - *txq_count = le32_to_cpu(desc->txq_count); + *txq_count = le16_to_cpu(nic->txq_count); } err: if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, - cmd.va, cmd.dma); + pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); return status; } -/* Uses sync mcc */ +/* Currently only Lancer uses this command and it supports version 0 only + * Uses sync mcc + */ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain) { @@ -3288,12 +3302,10 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req), wrb, NULL); - req->hdr.domain = domain; req->desc_count = cpu_to_le32(1); - - req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0; - req->nic_desc.desc_len = RESOURCE_DESC_SIZE; + req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; + req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV); req->nic_desc.pf_num = adapter->pf_number; req->nic_desc.vf_num = domain; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 880c85a..8ebed17 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1718,11 +1718,13 @@ struct be_cmd_req_set_ext_fat_caps { struct be_fat_conf_params set_params; }; -#define RESOURCE_DESC_SIZE 88 +#define RESOURCE_DESC_SIZE_V0 72 +#define RESOURCE_DESC_SIZE_V1 88 +#define PCIE_RESOURCE_DESC_TYPE_V0 0x40 #define NIC_RESOURCE_DESC_TYPE_V0 0x41 +#define PCIE_RESOURCE_DESC_TYPE_V1 0x50 #define NIC_RESOURCE_DESC_TYPE_V1 0x51 -#define MAX_RESOURCE_DESC 4 -#define MAX_RESOURCE_DESC_V1 32 +#define MAX_RESOURCE_DESC 264 /* QOS unit number */ #define QUN 4 @@ -1731,9 +1733,30 @@ struct be_cmd_req_set_ext_fat_caps { /* No save */ #define NOSV 7 -struct be_nic_resource_desc { +struct be_res_desc_hdr { u8 desc_type; u8 desc_len; +} __packed; + +struct be_pcie_res_desc { + struct be_res_desc_hdr hdr; + u8 rsvd0; + u8 flags; + u16 rsvd1; + u8 pf_num; + u8 rsvd2; + u32 rsvd3; + u8 sriov_state; + u8 pf_state; + u8 pf_type; + u8 rsvd4; + u16 num_vfs; + u16 rsvd5; + u32 rsvd6[17]; +} __packed; + +struct be_nic_res_desc { + struct be_res_desc_hdr hdr; u8 rsvd1; u8 flags; u8 vf_num; @@ -1762,7 +1785,7 @@ struct be_nic_resource_desc { u8 wol_param; u16 rsvd7; u32 rsvd8[3]; -}; +} __packed; struct be_cmd_req_get_func_config { struct be_cmd_req_hdr hdr; @@ -1771,7 +1794,7 @@ struct be_cmd_req_get_func_config { struct be_cmd_resp_get_func_config { struct be_cmd_resp_hdr hdr; u32 desc_count; - u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE]; + u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; }; #define ACTIVE_PROFILE_TYPE 0x2 @@ -1783,26 +1806,20 @@ struct be_cmd_req_get_profile_config { }; struct be_cmd_resp_get_profile_config { - struct be_cmd_req_hdr hdr; - u32 desc_count; - u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE]; -}; - -struct be_cmd_resp_get_profile_config_v1 { - struct be_cmd_req_hdr hdr; + struct be_cmd_resp_hdr hdr; u32 desc_count; - u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE]; + u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; }; struct be_cmd_req_set_profile_config { struct be_cmd_req_hdr hdr; u32 rsvd; u32 desc_count; - struct be_nic_resource_desc nic_desc; + struct be_nic_res_desc nic_desc; }; struct be_cmd_resp_set_profile_config { - struct be_cmd_req_hdr hdr; + struct be_cmd_resp_hdr hdr; }; struct be_cmd_enable_disable_vf { -- cgit v0.10.2 From 92bf14abf7a064936b5e6baab30661da86a86e52 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 27 Aug 2013 16:57:32 +0530 Subject: be2net: refactor be_get_resources() code 1) use be_resources{} struct to query/store HW resource limits 2) The HW queue/resource limits for BE2/BE3 chips are mostly called out in driver as constants. Code to handle this is scattered across various places in be_setup(). Consolidate this code into BEx_get_resources(). For Lancer-R, Skyhawk-R, these limits are queried from FW. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 7c5f9f2..8b41635 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -101,12 +101,15 @@ static inline char *nic_name(struct pci_dev *pdev) #define BE3_MAX_RSS_QS 8 #define BE2_MAX_RSS_QS 4 +#define BE3_MAX_TX_QS 8 #define MAX_RSS_QS BE3_MAX_RSS_QS #define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ +#define MAX_EVT_QS MAX_RSS_QS #define MAX_TX_QS 8 #define MAX_ROCE_EQS 5 #define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */ +#define MIN_MSIX_VECTORS 1 #define BE_TX_BUDGET 256 #define BE_NAPI_WEIGHT 64 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ @@ -353,6 +356,18 @@ struct phy_info { u32 supported; }; +struct be_resources { + u16 max_vfs; /* Total VFs "really" supported by FW/HW */ + u16 max_mcast_mac; + u16 max_tx_qs; + u16 max_rss_qs; + u16 max_rx_qs; + u16 max_uc_mac; /* Max UC MACs programmable */ + u16 max_vlans; /* Number of vlans supported */ + u16 max_evt_qs; + u32 if_cap_flags; +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -370,18 +385,19 @@ struct be_adapter { spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; - u32 num_msix_vec; - u32 num_evt_qs; - struct be_eq_obj eq_obj[MAX_MSIX_VECTORS]; + u16 cfg_num_qs; /* configured via set-channels */ + u16 num_evt_qs; + u16 num_msix_vec; + struct be_eq_obj eq_obj[MAX_EVT_QS]; struct msix_entry msix_entries[MAX_MSIX_VECTORS]; bool isr_registered; /* TX Rings */ - u32 num_tx_qs; + u16 num_tx_qs; struct be_tx_obj tx_obj[MAX_TX_QS]; /* Rx rings */ - u32 num_rx_qs; + u16 num_rx_qs; struct be_rx_obj rx_obj[MAX_RX_QS]; u32 big_page_size; /* Compounded page size shared by rx wrbs */ @@ -431,8 +447,8 @@ struct be_adapter { u32 flash_status; struct completion flash_compl; - u32 num_vfs; /* Number of VFs provisioned by PF driver */ - u32 dev_num_vfs; /* Number of VFs supported by HW */ + struct be_resources res; /* resources available for the func */ + u16 num_vfs; /* Number of VFs provisioned by PF */ u8 virtfn; struct be_vf_cfg *vf_cfg; bool be3_native; @@ -447,21 +463,13 @@ struct be_adapter { u16 qnq_vid; u32 msg_enable; int be_get_temp_freq; - u16 max_mcast_mac; - u16 max_tx_queues; - u16 max_rss_queues; - u16 max_rx_queues; - u16 max_pmac_cnt; - u16 max_vlans; - u16 max_event_queues; - u32 if_cap_flags; u8 pf_number; u64 rss_flags; }; #define be_physfn(adapter) (!adapter->virtfn) #define sriov_enabled(adapter) (adapter->num_vfs > 0) -#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \ +#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \ be_physfn(adapter)) #define for_all_vfs(adapter, vf_cfg, i) \ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ @@ -470,6 +478,26 @@ struct be_adapter { #define ON 1 #define OFF 0 +#define be_max_vlans(adapter) (adapter->res.max_vlans) +#define be_max_uc(adapter) (adapter->res.max_uc_mac) +#define be_max_mc(adapter) (adapter->res.max_mcast_mac) +#define be_max_vfs(adapter) (adapter->res.max_vfs) +#define be_max_rss(adapter) (adapter->res.max_rss_qs) +#define be_max_txqs(adapter) (adapter->res.max_tx_qs) +#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs) +#define be_max_rxqs(adapter) (adapter->res.max_rx_qs) +#define be_max_eqs(adapter) (adapter->res.max_evt_qs) +#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags) + +static inline u16 be_max_qs(struct be_adapter *adapter) +{ + /* If no RSS, need atleast the one def RXQ */ + u16 num = max_t(u16, be_max_rss(adapter), 1); + + num = min(num, be_max_eqs(adapter)); + return min_t(u16, num, num_online_cpus()); +} + #define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ adapter->pdev->device == OC_DEVICE_ID4) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index bc6f958..5458a43 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1776,8 +1776,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) */ req->if_flags_mask |= cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & - adapter->if_cap_flags); - + be_if_cap_flags(adapter)); req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); netdev_for_each_mc_addr(ha, adapter->netdev) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); @@ -3103,8 +3102,26 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, return NULL; } +static void be_copy_nic_desc(struct be_resources *res, + struct be_nic_res_desc *desc) +{ + res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); + res->max_vlans = le16_to_cpu(desc->vlan_count); + res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); + res->max_tx_qs = le16_to_cpu(desc->txq_count); + res->max_rss_qs = le16_to_cpu(desc->rssq_count); + res->max_rx_qs = le16_to_cpu(desc->rq_count); + res->max_evt_qs = le16_to_cpu(desc->eq_count); + /* Clear flags that driver is not interested in */ + res->if_cap_flags = le32_to_cpu(desc->cap_flags) & + BE_IF_CAP_FLAGS_WANT; + /* Need 1 RXQ as the default RXQ */ + if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs) + res->max_rss_qs -= 1; +} + /* Uses Mbox */ -int be_cmd_get_func_config(struct be_adapter *adapter) +int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_func_config *req; @@ -3152,18 +3169,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter) } adapter->pf_number = desc->pf_num; - adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count); - adapter->max_vlans = le16_to_cpu(desc->vlan_count); - adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); - adapter->max_tx_queues = le16_to_cpu(desc->txq_count); - adapter->max_rss_queues = le16_to_cpu(desc->rssq_count); - adapter->max_rx_queues = le16_to_cpu(desc->rq_count); - - adapter->max_event_queues = le16_to_cpu(desc->eq_count); - adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); - - /* Clear flags that driver is not interested in */ - adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; + be_copy_nic_desc(res, desc); } err: mutex_unlock(&adapter->mbox_lock); @@ -3234,8 +3240,8 @@ err: } /* Uses sync mcc, if MCCQ is already created otherwise mbox */ -int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, - u16 *txq_count, u8 domain) +int be_cmd_get_profile_config(struct be_adapter *adapter, + struct be_resources *res, u8 domain) { struct be_cmd_resp_get_profile_config *resp; struct be_pcie_res_desc *pcie; @@ -3264,15 +3270,12 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, desc_count); if (pcie) - adapter->dev_num_vfs = le16_to_cpu(pcie->num_vfs); + res->max_vfs = le16_to_cpu(pcie->num_vfs); nic = be_get_nic_desc(resp->func_param, desc_count); - if (nic) { - if (cap_flags) - *cap_flags = le32_to_cpu(nic->cap_flags); - if (txq_count) - *txq_count = le16_to_cpu(nic->txq_count); - } + if (nic) + be_copy_nic_desc(res, nic); + err: if (cmd.va) pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 8ebed17..52f3d4c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1980,10 +1980,10 @@ extern int lancer_initiate_dump(struct be_adapter *adapter); extern bool dump_present(struct be_adapter *adapter); extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); -extern int be_cmd_get_func_config(struct be_adapter *adapter); -extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, - u16 *txq_count, u8 domain); - +int be_cmd_get_func_config(struct be_adapter *adapter, + struct be_resources *res); +int be_cmd_get_profile_config(struct be_adapter *adapter, + struct be_resources *res, u8 domain); extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain); extern int be_cmd_get_if_id(struct be_adapter *adapter, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f040d8..d34ea98 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1001,7 +1001,7 @@ static int be_vid_config(struct be_adapter *adapter) if (adapter->promiscuous) return 0; - if (adapter->vlans_added > adapter->max_vlans) + if (adapter->vlans_added > be_max_vlans(adapter)) goto set_vlan_promisc; /* Construct VLAN Table to give to HW */ @@ -1042,7 +1042,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) goto ret; adapter->vlan_tag[vid] = 1; - if (adapter->vlans_added <= (adapter->max_vlans + 1)) + if (adapter->vlans_added <= (be_max_vlans(adapter) + 1)) status = be_vid_config(adapter); if (!status) @@ -1068,7 +1068,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) goto ret; adapter->vlan_tag[vid] = 0; - if (adapter->vlans_added <= adapter->max_vlans) + if (adapter->vlans_added <= be_max_vlans(adapter)) status = be_vid_config(adapter); if (!status) @@ -1101,7 +1101,7 @@ static void be_set_rx_mode(struct net_device *netdev) /* Enable multicast promisc if num configured exceeds what we support */ if (netdev->flags & IFF_ALLMULTI || - netdev_mc_count(netdev) > adapter->max_mcast_mac) { + netdev_mc_count(netdev) > be_max_mc(adapter)) { be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); goto done; } @@ -1115,7 +1115,7 @@ static void be_set_rx_mode(struct net_device *netdev) adapter->pmac_id[i], 0); } - if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) { + if (netdev_uc_count(netdev) > be_max_uc(adapter)) { be_cmd_rx_filter(adapter, IFF_PROMISC, ON); adapter->promiscuous = true; goto done; @@ -1924,7 +1924,8 @@ static int be_evt_queues_create(struct be_adapter *adapter) struct be_eq_obj *eqo; int i, rc; - adapter->num_evt_qs = num_irqs(adapter); + adapter->num_evt_qs = min_t(u16, num_irqs(adapter), + adapter->cfg_num_qs); for_all_evt_queues(adapter, eqo, i) { eqo->adapter = adapter; @@ -2013,25 +2014,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) } } -static int be_num_txqs_want(struct be_adapter *adapter) -{ - if ((!lancer_chip(adapter) && sriov_want(adapter)) || - be_is_mc(adapter) || - (!lancer_chip(adapter) && !be_physfn(adapter)) || - BE2_chip(adapter)) - return 1; - else - return adapter->max_tx_queues; -} - static int be_tx_cqs_create(struct be_adapter *adapter) { struct be_queue_info *cq, *eq; - int status; struct be_tx_obj *txo; - u8 i; + int status, i; - adapter->num_tx_qs = be_num_txqs_want(adapter); + adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); if (adapter->num_tx_qs != MAX_TX_QS) { rtnl_lock(); netif_set_real_num_tx_queues(adapter->netdev, @@ -2098,11 +2087,15 @@ static int be_rx_cqs_create(struct be_adapter *adapter) struct be_rx_obj *rxo; int rc, i; - /* We'll create as many RSS rings as there are irqs. - * But when there's only one irq there's no use creating RSS rings + /* We can create as many RSS rings as there are EQs. */ + adapter->num_rx_qs = adapter->num_evt_qs; + + /* We'll use RSS only if atleast 2 RSS rings are supported. + * When RSS is used, we'll need a default RXQ for non-IP traffic. */ - adapter->num_rx_qs = (num_irqs(adapter) > 1) ? - num_irqs(adapter) + 1 : 1; + if (adapter->num_rx_qs > 1) + adapter->num_rx_qs++; + if (adapter->num_rx_qs != MAX_RX_QS) { rtnl_lock(); netif_set_real_num_rx_queues(adapter->netdev, @@ -2375,35 +2368,20 @@ static void be_msix_disable(struct be_adapter *adapter) } } -static uint be_num_rss_want(struct be_adapter *adapter) -{ - u32 num = 0; - - if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - (lancer_chip(adapter) || - (!sriov_want(adapter) && be_physfn(adapter)))) { - num = adapter->max_rss_queues; - num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); - } - return num; -} - static int be_msix_enable(struct be_adapter *adapter) { -#define BE_MIN_MSIX_VECTORS 1 - int i, status, num_vec, num_roce_vec = 0; + int i, status, num_vec; struct device *dev = &adapter->pdev->dev; - /* If RSS queues are not used, need a vec for default RX Q */ - num_vec = min(be_num_rss_want(adapter), num_online_cpus()); - if (be_roce_supported(adapter)) { - num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS, - (num_online_cpus() + 1)); - num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS); - num_vec += num_roce_vec; - num_vec = min(num_vec, MAX_MSIX_VECTORS); - } - num_vec = max(num_vec, BE_MIN_MSIX_VECTORS); + /* If RoCE is supported, program the max number of NIC vectors that + * may be configured via set-channels, along with vectors needed for + * RoCe. Else, just program the number we'll use initially. + */ + if (be_roce_supported(adapter)) + num_vec = min_t(int, 2 * be_max_eqs(adapter), + 2 * num_online_cpus()); + else + num_vec = adapter->cfg_num_qs; for (i = 0; i < num_vec; i++) adapter->msix_entries[i].entry = i; @@ -2411,7 +2389,7 @@ static int be_msix_enable(struct be_adapter *adapter) status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); if (status == 0) { goto done; - } else if (status >= BE_MIN_MSIX_VECTORS) { + } else if (status >= MIN_MSIX_VECTORS) { num_vec = status; status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); @@ -2420,23 +2398,22 @@ static int be_msix_enable(struct be_adapter *adapter) } dev_warn(dev, "MSIx enable failed\n"); + /* INTx is not supported in VFs, so fail probe if enable_msix fails */ if (!be_physfn(adapter)) return status; return 0; done: - if (be_roce_supported(adapter)) { - if (num_vec > num_roce_vec) { - adapter->num_msix_vec = num_vec - num_roce_vec; - adapter->num_msix_roce_vec = - num_vec - adapter->num_msix_vec; - } else { - adapter->num_msix_vec = num_vec; - adapter->num_msix_roce_vec = 0; - } - } else - adapter->num_msix_vec = num_vec; - dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec); + if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) { + adapter->num_msix_roce_vec = num_vec / 2; + dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n", + adapter->num_msix_roce_vec); + } + + adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec; + + dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n", + adapter->num_msix_vec); return 0; } @@ -2829,6 +2806,7 @@ static int be_clear(struct be_adapter *adapter) static int be_vfs_if_create(struct be_adapter *adapter) { + struct be_resources res = {0}; struct be_vf_cfg *vf_cfg; u32 cap_flags, en_flags, vf; int status; @@ -2837,9 +2815,12 @@ static int be_vfs_if_create(struct be_adapter *adapter) BE_IF_FLAGS_MULTICAST; for_all_vfs(adapter, vf_cfg, vf) { - if (!BE3_chip(adapter)) - be_cmd_get_profile_config(adapter, &cap_flags, - NULL, vf + 1); + if (!BE3_chip(adapter)) { + status = be_cmd_get_profile_config(adapter, &res, + vf + 1); + if (!status) + cap_flags = res.if_cap_flags; + } /* If a FW profile exists, then cap_flags are updated */ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | @@ -2885,10 +2866,10 @@ static int be_vf_setup(struct be_adapter *adapter) dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); adapter->num_vfs = old_vfs; } else { - if (num_vfs > adapter->dev_num_vfs) + if (num_vfs > be_max_vfs(adapter)) dev_info(dev, "Device supports %d VFs and not %d\n", - adapter->dev_num_vfs, num_vfs); - adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs); + be_max_vfs(adapter), num_vfs); + adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter)); if (!adapter->num_vfs) return 0; } @@ -2967,6 +2948,51 @@ err: return status; } +/* On BE2/BE3 FW does not suggest the supported limits */ +static void BEx_get_resources(struct be_adapter *adapter, + struct be_resources *res) +{ + struct pci_dev *pdev = adapter->pdev; + bool use_sriov = false; + + if (BE3_chip(adapter) && be_physfn(adapter)) { + int max_vfs; + + max_vfs = pci_sriov_get_totalvfs(pdev); + res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; + use_sriov = res->max_vfs && num_vfs; + } + + if (be_physfn(adapter)) + res->max_uc_mac = BE_UC_PMAC_COUNT; + else + res->max_uc_mac = BE_VF_UC_PMAC_COUNT; + + if (adapter->function_mode & FLEX10_MODE) + res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; + else + res->max_vlans = BE_NUM_VLANS_SUPPORTED; + res->max_mcast_mac = BE_MAX_MC; + + if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) || + !be_physfn(adapter)) + res->max_tx_qs = 1; + else + res->max_tx_qs = BE3_MAX_TX_QS; + + if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && + !use_sriov && be_physfn(adapter)) + res->max_rss_qs = (adapter->be3_native) ? + BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; + res->max_rx_qs = res->max_rss_qs + 1; + + res->max_evt_qs = be_physfn(adapter) ? MAX_EVT_QS : 1; + + res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; + if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) + res->if_cap_flags &= ~BE_IF_FLAGS_RSS; +} + static void be_setup_init(struct be_adapter *adapter) { adapter->vlan_prio_bmap = 0xff; @@ -2980,76 +3006,56 @@ static void be_setup_init(struct be_adapter *adapter) adapter->cmd_privileges = MIN_PRIVILEGES; } -static void be_get_resources(struct be_adapter *adapter) +static int be_get_resources(struct be_adapter *adapter) { - u16 dev_num_vfs; - int pos, status; - bool profile_present = false; - u16 txq_count = 0; + struct device *dev = &adapter->pdev->dev; + struct be_resources res = {0}; + int status; - if (!BEx_chip(adapter)) { - status = be_cmd_get_func_config(adapter); - if (!status) - profile_present = true; - } else if (BE3_chip(adapter) && be_physfn(adapter)) { - be_cmd_get_profile_config(adapter, NULL, &txq_count, 0); + if (BEx_chip(adapter)) { + BEx_get_resources(adapter, &res); + adapter->res = res; } - if (profile_present) { - adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues, - MAX_TX_QS); - adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues, - BE3_MAX_RSS_QS); - adapter->max_event_queues = min_t(u16, - adapter->max_event_queues, - BE3_MAX_RSS_QS); - - if (adapter->max_rss_queues && - adapter->max_rss_queues == adapter->max_rx_queues) - adapter->max_rss_queues -= 1; - - if (adapter->max_event_queues < adapter->max_rss_queues) - adapter->max_rss_queues = adapter->max_event_queues; - - } else { - if (be_physfn(adapter)) - adapter->max_pmac_cnt = BE_UC_PMAC_COUNT; - else - adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT; + /* For BE3 only check if FW suggests a different max-txqs value */ + if (BE3_chip(adapter)) { + status = be_cmd_get_profile_config(adapter, &res, 0); + if (!status && res.max_tx_qs) + adapter->res.max_tx_qs = + min(adapter->res.max_tx_qs, res.max_tx_qs); + } - if (adapter->function_mode & FLEX10_MODE) - adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8; - else - adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; + /* For Lancer, SH etc read per-function resource limits from FW. + * GET_FUNC_CONFIG returns per function guaranteed limits. + * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits + */ + if (!BEx_chip(adapter)) { + status = be_cmd_get_func_config(adapter, &res); + if (status) + return status; - adapter->max_mcast_mac = BE_MAX_MC; - adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS; - adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues, - MAX_TX_QS); - adapter->max_rss_queues = (adapter->be3_native) ? - BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; - adapter->max_event_queues = BE3_MAX_RSS_QS; + /* If RoCE may be enabled stash away half the EQs for RoCE */ + if (be_roce_supported(adapter)) + res.max_evt_qs /= 2; + adapter->res = res; - adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED | - BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | - BE_IF_FLAGS_PASS_L3L4_ERRORS | - BE_IF_FLAGS_MCAST_PROMISCUOUS | - BE_IF_FLAGS_VLAN_PROMISCUOUS | - BE_IF_FLAGS_PROMISCUOUS; + if (be_physfn(adapter)) { + status = be_cmd_get_profile_config(adapter, &res, 0); + if (status) + return status; + adapter->res.max_vfs = res.max_vfs; + } - if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) - adapter->if_cap_flags |= BE_IF_FLAGS_RSS; + dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", + be_max_txqs(adapter), be_max_rxqs(adapter), + be_max_rss(adapter), be_max_eqs(adapter), + be_max_vfs(adapter)); + dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", + be_max_uc(adapter), be_max_mc(adapter), + be_max_vlans(adapter)); } - pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); - if (pos) { - pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, - &dev_num_vfs); - if (BE3_chip(adapter)) - dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); - adapter->dev_num_vfs = dev_num_vfs; - } + return 0; } /* Routine to query per function resource limits */ @@ -3062,20 +3068,22 @@ static int be_get_config(struct be_adapter *adapter) &adapter->function_caps, &adapter->asic_rev); if (status) - goto err; + return status; - be_get_resources(adapter); + status = be_get_resources(adapter); + if (status) + return status; /* primary mac needs 1 pmac entry */ - adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, - sizeof(u32), GFP_KERNEL); - if (!adapter->pmac_id) { - status = -ENOMEM; - goto err; - } + adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32), + GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; -err: - return status; + /* Sanitize cfg_num_qs based on HW and platform limits */ + adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter)); + + return 0; } static int be_mac_setup(struct be_adapter *adapter) @@ -3151,8 +3159,8 @@ static int be_setup(struct be_adapter *adapter) BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) en_flags |= BE_IF_FLAGS_RSS; - en_flags = en_flags & adapter->if_cap_flags; - status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags, + en_flags = en_flags & be_if_cap_flags(adapter); + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, &adapter->if_handle, 0); if (status != 0) goto err; @@ -3178,8 +3186,8 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - if (be_physfn(adapter)) { - if (adapter->dev_num_vfs) + if (be_physfn(adapter) && num_vfs) { + if (be_max_vfs(adapter)) be_vf_setup(adapter); else dev_warn(dev, "device doesn't support SRIOV\n"); @@ -4045,6 +4053,7 @@ static int be_get_initial_config(struct be_adapter *adapter) level = be_get_fw_log_level(adapter); adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + adapter->cfg_num_qs = netif_get_num_default_rss_queues(); return 0; } diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 645e846..9cd5415 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -60,7 +60,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter) */ num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec; dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX; - dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS); + dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS); /* provide start index of the vector, * so in case of linear usage, * it can use the base as starting point. diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index 2765729..2cd1129 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -29,7 +29,7 @@ enum be_interrupt_mode { BE_INTERRUPT_MODE_MSI = 2, }; -#define MAX_ROCE_MSIX_VECTORS 16 +#define MAX_MSIX_VECTORS 32 struct be_dev_info { u8 __iomem *db; u64 unmapped_db; @@ -45,7 +45,7 @@ struct be_dev_info { struct { int num_vectors; int start_vector; - u32 vector_list[MAX_ROCE_MSIX_VECTORS]; + u32 vector_list[MAX_MSIX_VECTORS]; } msix; }; -- cgit v0.10.2 From bea5098848925351ed6fc84dc84c88b2765237f7 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 27 Aug 2013 16:57:33 +0530 Subject: be2net: Fix be_cmd_if_create() to use MBOX if MCCQ is not created Currently the IF_CREATE FW cmd is issued only *after* MCCQ is created as it was coded to only use MCCQ. By fixing this, cmd_if_create() can be called before MCCQ is created and the same routine for VF provisioning can be called after. This allows for consolidating all the queue create routines by moving the be_cmd_if_create() call above all queue create calls in be_setup(). Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 5458a43..70d5db0 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -633,6 +633,12 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) return &wrb->payload.sgl[0]; } +static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, + unsigned long addr) +{ + wrb->tag0 = addr & 0xFFFFFFFF; + wrb->tag1 = upper_32_bits(addr); +} /* Don't touch the hdr after it's prepared */ /* mem will be NULL for embedded commands */ @@ -641,17 +647,12 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, struct be_mcc_wrb *wrb, struct be_dma_mem *mem) { struct be_sge *sge; - unsigned long addr = (unsigned long)req_hdr; - u64 req_addr = addr; req_hdr->opcode = opcode; req_hdr->subsystem = subsystem; req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); req_hdr->version = 0; - - wrb->tag0 = req_addr & 0xFFFFFFFF; - wrb->tag1 = upper_32_bits(req_addr); - + fill_wrb_tags(wrb, (ulong) req_hdr); wrb->payload_length = cmd_len; if (mem) { wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << @@ -705,6 +706,78 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) return wrb; } +static bool use_mcc(struct be_adapter *adapter) +{ + return adapter->mcc_obj.q.created; +} + +/* Must be used only in process context */ +static int be_cmd_lock(struct be_adapter *adapter) +{ + if (use_mcc(adapter)) { + spin_lock_bh(&adapter->mcc_lock); + return 0; + } else { + return mutex_lock_interruptible(&adapter->mbox_lock); + } +} + +/* Must be used only in process context */ +static void be_cmd_unlock(struct be_adapter *adapter) +{ + if (use_mcc(adapter)) + spin_unlock_bh(&adapter->mcc_lock); + else + return mutex_unlock(&adapter->mbox_lock); +} + +static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, + struct be_mcc_wrb *wrb) +{ + struct be_mcc_wrb *dest_wrb; + + if (use_mcc(adapter)) { + dest_wrb = wrb_from_mccq(adapter); + if (!dest_wrb) + return NULL; + } else { + dest_wrb = wrb_from_mbox(adapter); + } + + memcpy(dest_wrb, wrb, sizeof(*wrb)); + if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) + fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb)); + + return dest_wrb; +} + +/* Must be used only in process context */ +static int be_cmd_notify_wait(struct be_adapter *adapter, + struct be_mcc_wrb *wrb) +{ + struct be_mcc_wrb *dest_wrb; + int status; + + status = be_cmd_lock(adapter); + if (status) + return status; + + dest_wrb = be_cmd_copy(adapter, wrb); + if (!dest_wrb) + return -EBUSY; + + if (use_mcc(adapter)) + status = be_mcc_notify_wait(adapter); + else + status = be_mbox_notify_wait(adapter); + + if (!status) + memcpy(wrb, dest_wrb, sizeof(*wrb)); + + be_cmd_unlock(adapter); + return status; +} + /* Tell fw we're about to start firing cmds by writing a * special pattern across the wrb hdr; uses mbox */ @@ -1290,44 +1363,32 @@ err: } /* Create an rx filtering policy configuration on an i/f - * Uses MCCQ + * Will use MBOX only if MCCQ has not been created. */ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u32 *if_handle, u32 domain) { - struct be_mcc_wrb *wrb; + struct be_mcc_wrb wrb = {0}; struct be_cmd_req_if_create *req; int status; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - req = embedded_payload(wrb); - + req = embedded_payload(&wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL); + OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL); req->hdr.domain = domain; req->capability_flags = cpu_to_le32(cap_flags); req->enable_flags = cpu_to_le32(en_flags); - req->pmac_invalid = true; - status = be_mcc_notify_wait(adapter); + status = be_cmd_notify_wait(adapter, &wrb); if (!status) { - struct be_cmd_resp_if_create *resp = embedded_payload(wrb); + struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); *if_handle = le32_to_cpu(resp->interface_id); /* Hack to retrieve VF's pmac-id on BE3 */ if (BE3_chip(adapter) && !be_physfn(adapter)) adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); } - -err: - spin_unlock_bh(&adapter->mcc_lock); return status; } -- cgit v0.10.2 From 7707133ceb38f590729d0165099555928630af1c Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 27 Aug 2013 16:57:34 +0530 Subject: be2net: refactor be_setup() to consolidate queue creation routines 1) Move be_cmd_if_create() above queue create routines to allow TXQ creation (that requires if_handle) to be clubbed with TX-CQ creation. 2) Consolidate all queue create routines into be_setup_queues() Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 70d5db0..52c9085 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1182,25 +1182,16 @@ int be_cmd_mccq_create(struct be_adapter *adapter, int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) { - struct be_mcc_wrb *wrb; + struct be_mcc_wrb wrb = {0}; struct be_cmd_req_eth_tx_create *req; struct be_queue_info *txq = &txo->q; struct be_queue_info *cq = &txo->cq; struct be_dma_mem *q_mem = &txq->dma_mem; int status, ver = 0; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - - req = embedded_payload(wrb); - + req = embedded_payload(&wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, - OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); + OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); if (lancer_chip(adapter)) { req->hdr.version = 1; @@ -1218,12 +1209,11 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) req->cq_id = cpu_to_le16(cq->id); req->queue_size = be_encoded_q_len(txq->len); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - ver = req->hdr.version; - status = be_mcc_notify_wait(adapter); + status = be_cmd_notify_wait(adapter, &wrb); if (!status) { - struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); + struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); txq->id = le16_to_cpu(resp->cid); if (ver == 2) txo->db_offset = le32_to_cpu(resp->db_offset); @@ -1232,9 +1222,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) txq->created = true; } -err: - spin_unlock_bh(&adapter->mcc_lock); - return status; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d34ea98..b08459d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2014,7 +2014,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) } } -static int be_tx_cqs_create(struct be_adapter *adapter) +static int be_tx_qs_create(struct be_adapter *adapter) { struct be_queue_info *cq, *eq; struct be_tx_obj *txo; @@ -2042,16 +2042,7 @@ static int be_tx_cqs_create(struct be_adapter *adapter) status = be_cmd_cq_create(adapter, cq, eq, false, 3); if (status) return status; - } - return 0; -} -static int be_tx_qs_create(struct be_adapter *adapter) -{ - struct be_tx_obj *txo; - int i, status; - - for_all_tx_queues(adapter, txo, i) { status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN, sizeof(struct be_eth_wrb)); if (status) @@ -2772,6 +2763,14 @@ done: adapter->num_vfs = 0; } +static void be_clear_queues(struct be_adapter *adapter) +{ + be_mcc_queues_destroy(adapter); + be_rx_cqs_destroy(adapter); + be_tx_queues_destroy(adapter); + be_evt_queues_destroy(adapter); +} + static int be_clear(struct be_adapter *adapter) { int i; @@ -2792,10 +2791,7 @@ static int be_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, adapter->if_handle, 0); - be_mcc_queues_destroy(adapter); - be_rx_cqs_destroy(adapter); - be_tx_queues_destroy(adapter); - be_evt_queues_destroy(adapter); + be_clear_queues(adapter); kfree(adapter->pmac_id); adapter->pmac_id = NULL; @@ -3112,64 +3108,73 @@ static int be_mac_setup(struct be_adapter *adapter) return 0; } -static int be_setup(struct be_adapter *adapter) +static int be_setup_queues(struct be_adapter *adapter) { - struct device *dev = &adapter->pdev->dev; - u32 en_flags; - u32 tx_fc, rx_fc; int status; - be_setup_init(adapter); - - if (!lancer_chip(adapter)) - be_cmd_req_native_mode(adapter); - - status = be_get_config(adapter); + status = be_evt_queues_create(adapter); if (status) goto err; - status = be_msix_enable(adapter); + status = be_tx_qs_create(adapter); if (status) goto err; - status = be_evt_queues_create(adapter); + status = be_rx_cqs_create(adapter); if (status) goto err; - status = be_tx_cqs_create(adapter); + status = be_mcc_queues_create(adapter); if (status) goto err; - status = be_rx_cqs_create(adapter); + return 0; +err: + dev_err(&adapter->pdev->dev, "queue_setup failed\n"); + return status; +} + +static int be_setup(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + u32 tx_fc, rx_fc, en_flags; + int status; + + be_setup_init(adapter); + + if (!lancer_chip(adapter)) + be_cmd_req_native_mode(adapter); + + status = be_get_config(adapter); if (status) goto err; - status = be_mcc_queues_create(adapter); + status = be_msix_enable(adapter); if (status) goto err; - be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); - /* In UMC mode FW does not return right privileges. - * Override with correct privilege equivalent to PF. - */ - if (be_is_mc(adapter)) - adapter->cmd_privileges = MAX_PRIVILEGES; - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; + BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) en_flags |= BE_IF_FLAGS_RSS; en_flags = en_flags & be_if_cap_flags(adapter); status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, &adapter->if_handle, 0); - if (status != 0) + if (status) goto err; - status = be_mac_setup(adapter); + status = be_setup_queues(adapter); if (status) goto err; - status = be_tx_qs_create(adapter); + be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); + /* In UMC mode FW does not return right privileges. + * Override with correct privilege equivalent to PF. + */ + if (be_is_mc(adapter)) + adapter->cmd_privileges = MAX_PRIVILEGES; + + status = be_mac_setup(adapter); if (status) goto err; -- cgit v0.10.2 From 68d7bdcb4c00ec799ad949f4b4c50985539710e2 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 27 Aug 2013 16:57:35 +0530 Subject: be2net: implement ethtool set/get_channel hooks Support is provided only for combined channels. When SR-IOV is not enabled, BE3 supports upto 16 channels and Lancer-R/SH-R support upto 32 channels. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8b41635..ace5050 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -99,16 +99,17 @@ static inline char *nic_name(struct pci_dev *pdev) #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ #define MCC_CQ_LEN 256 -#define BE3_MAX_RSS_QS 8 #define BE2_MAX_RSS_QS 4 -#define BE3_MAX_TX_QS 8 -#define MAX_RSS_QS BE3_MAX_RSS_QS -#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ -#define MAX_EVT_QS MAX_RSS_QS +#define BE3_MAX_RSS_QS 16 +#define BE3_MAX_TX_QS 16 +#define BE3_MAX_EVT_QS 16 + +#define MAX_RX_QS 32 +#define MAX_EVT_QS 32 +#define MAX_TX_QS 32 -#define MAX_TX_QS 8 #define MAX_ROCE_EQS 5 -#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */ +#define MAX_MSIX_VECTORS 32 #define MIN_MSIX_VECTORS 1 #define BE_TX_BUDGET 256 #define BE_NAPI_WEIGHT 64 @@ -701,6 +702,8 @@ extern int be_load_fw(struct be_adapter *adapter, u8 *func); extern bool be_is_wol_supported(struct be_adapter *adapter); extern bool be_pause_supported(struct be_adapter *adapter); extern u32 be_get_fw_log_level(struct be_adapter *adapter); +int be_update_queues(struct be_adapter *adapter); +int be_poll(struct napi_struct *napi, int budget); /* * internal function to initialize-cleanup roce device. diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 4f8c941..b440a1f 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1119,6 +1119,29 @@ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) return status; } +static void be_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + ch->combined_count = adapter->num_evt_qs; + ch->max_combined = be_max_qs(adapter); +} + +static int be_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (ch->rx_count || ch->tx_count || ch->other_count || + !ch->combined_count || ch->combined_count > be_max_qs(adapter)) + return -EINVAL; + + adapter->cfg_num_qs = ch->combined_count; + + return be_update_queues(adapter); +} + const struct ethtool_ops be_ethtool_ops = { .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, @@ -1145,4 +1168,6 @@ const struct ethtool_ops be_ethtool_ops = { .self_test = be_self_test, .get_rxnfc = be_get_rxnfc, .set_rxnfc = be_set_rxnfc, + .get_channels = be_get_channels, + .set_channels = be_set_channels }; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b08459d..50116f8 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1913,6 +1913,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) if (eqo->q.created) { be_eq_clean(eqo); be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); + netif_napi_del(&eqo->napi); } be_queue_free(adapter, &eqo->q); } @@ -1928,6 +1929,8 @@ static int be_evt_queues_create(struct be_adapter *adapter) adapter->cfg_num_qs); for_all_evt_queues(adapter, eqo, i) { + netif_napi_add(adapter->netdev, &eqo->napi, be_poll, + BE_NAPI_WEIGHT); eqo->adapter = adapter; eqo->tx_budget = BE_TX_BUDGET; eqo->idx = i; @@ -2021,12 +2024,6 @@ static int be_tx_qs_create(struct be_adapter *adapter) int status, i; adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); - if (adapter->num_tx_qs != MAX_TX_QS) { - rtnl_lock(); - netif_set_real_num_tx_queues(adapter->netdev, - adapter->num_tx_qs); - rtnl_unlock(); - } for_all_tx_queues(adapter, txo, i) { cq = &txo->cq; @@ -2087,13 +2084,6 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (adapter->num_rx_qs > 1) adapter->num_rx_qs++; - if (adapter->num_rx_qs != MAX_RX_QS) { - rtnl_lock(); - netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_qs); - rtnl_unlock(); - } - adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { rxo->adapter = adapter; @@ -2244,7 +2234,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, return (work_done < budget); /* Done */ } -static int be_poll(struct napi_struct *napi, int budget) +int be_poll(struct napi_struct *napi, int budget) { struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); struct be_adapter *adapter = eqo->adapter; @@ -2356,6 +2346,7 @@ static void be_msix_disable(struct be_adapter *adapter) if (msix_enabled(adapter)) { pci_disable_msix(adapter->pdev); adapter->num_msix_vec = 0; + adapter->num_msix_roce_vec = 0; } } @@ -2771,14 +2762,19 @@ static void be_clear_queues(struct be_adapter *adapter) be_evt_queues_destroy(adapter); } -static int be_clear(struct be_adapter *adapter) +static void be_cancel_worker(struct be_adapter *adapter) { - int i; - if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { cancel_delayed_work_sync(&adapter->work); adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED; } +} + +static int be_clear(struct be_adapter *adapter) +{ + int i; + + be_cancel_worker(adapter); if (sriov_enabled(adapter)) be_vf_clear(adapter); @@ -2982,7 +2978,7 @@ static void BEx_get_resources(struct be_adapter *adapter, BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; res->max_rx_qs = res->max_rss_qs + 1; - res->max_evt_qs = be_physfn(adapter) ? MAX_EVT_QS : 1; + res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) @@ -3108,8 +3104,15 @@ static int be_mac_setup(struct be_adapter *adapter) return 0; } +static void be_schedule_worker(struct be_adapter *adapter) +{ + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); + adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; +} + static int be_setup_queues(struct be_adapter *adapter) { + struct net_device *netdev = adapter->netdev; int status; status = be_evt_queues_create(adapter); @@ -3128,12 +3131,56 @@ static int be_setup_queues(struct be_adapter *adapter) if (status) goto err; + status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs); + if (status) + goto err; + + status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs); + if (status) + goto err; + return 0; err: dev_err(&adapter->pdev->dev, "queue_setup failed\n"); return status; } +int be_update_queues(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + if (netif_running(netdev)) + be_close(netdev); + + be_cancel_worker(adapter); + + /* If any vectors have been shared with RoCE we cannot re-program + * the MSIx table. + */ + if (!adapter->num_msix_roce_vec) + be_msix_disable(adapter); + + be_clear_queues(adapter); + + if (!msix_enabled(adapter)) { + status = be_msix_enable(adapter); + if (status) + return status; + } + + status = be_setup_queues(adapter); + if (status) + return status; + + be_schedule_worker(adapter); + + if (netif_running(netdev)) + status = be_open(netdev); + + return status; +} + static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; @@ -3163,7 +3210,10 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; + /* Updating real_num_tx/rx_queues() requires rtnl_lock() */ + rtnl_lock(); status = be_setup_queues(adapter); + rtnl_unlock(); if (status) goto err; @@ -3202,8 +3252,7 @@ static int be_setup(struct be_adapter *adapter) if (!status && be_pause_supported(adapter)) adapter->phy.fc_autoneg = 1; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); - adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; + be_schedule_worker(adapter); return 0; err: be_clear(adapter); @@ -3769,8 +3818,6 @@ static const struct net_device_ops be_netdev_ops = { static void be_netdev_init(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); - struct be_eq_obj *eqo; - int i; netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | @@ -3793,9 +3840,6 @@ static void be_netdev_init(struct net_device *netdev) netdev->netdev_ops = &be_netdev_ops; SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); - - for_all_evt_queues(adapter, eqo, i) - netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); } static void be_unmap_pci_bars(struct be_adapter *adapter) -- cgit v0.10.2 From b0eb57cb97e7837ebb746404c2c58c6f536f23fa Mon Sep 17 00:00:00 2001 From: Andy King Date: Fri, 23 Aug 2013 09:33:49 -0700 Subject: VMXNET3: Add support for virtual IOMMU This patch adds support for virtual IOMMU to the vmxnet3 module. We switch to DMA consistent mappings for anything we pass to the device. There were a few places where we already did this, but using pci_blah(); these have been fixed to use dma_blah(), along with all new occurrences where we've replaced kmalloc() and friends. Also fix two small bugs: 1) use after free of rq->buf_info in vmxnet3_rq_destroy() 2) a cpu_to_le32() that should have been a cpu_to_le64() Acked-by: George Zhang Acked-by: Aditya Sarwade Signed-off-by: Andy King Signed-off-by: David S. Miller diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 55a62ca..7e2788c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, struct pci_dev *pdev) { if (tbi->map_type == VMXNET3_MAP_SINGLE) - pci_unmap_single(pdev, tbi->dma_addr, tbi->len, + dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, PCI_DMA_TODEVICE); else if (tbi->map_type == VMXNET3_MAP_PAGE) - pci_unmap_page(pdev, tbi->dma_addr, tbi->len, + dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, PCI_DMA_TODEVICE); else BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); @@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { if (tq->tx_ring.base) { - pci_free_consistent(adapter->pdev, tq->tx_ring.size * - sizeof(struct Vmxnet3_TxDesc), - tq->tx_ring.base, tq->tx_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * + sizeof(struct Vmxnet3_TxDesc), + tq->tx_ring.base, tq->tx_ring.basePA); tq->tx_ring.base = NULL; } if (tq->data_ring.base) { - pci_free_consistent(adapter->pdev, tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc), - tq->data_ring.base, tq->data_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc), + tq->data_ring.base, tq->data_ring.basePA); tq->data_ring.base = NULL; } if (tq->comp_ring.base) { - pci_free_consistent(adapter->pdev, tq->comp_ring.size * - sizeof(struct Vmxnet3_TxCompDesc), - tq->comp_ring.base, tq->comp_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc), + tq->comp_ring.base, tq->comp_ring.basePA); tq->comp_ring.base = NULL; } - kfree(tq->buf_info); - tq->buf_info = NULL; + if (tq->buf_info) { + dma_free_coherent(&adapter->pdev->dev, + tq->tx_ring.size * sizeof(tq->buf_info[0]), + tq->buf_info, tq->buf_info_pa); + tq->buf_info = NULL; + } } @@ -496,37 +500,38 @@ static int vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { + size_t sz; + BUG_ON(tq->tx_ring.base || tq->data_ring.base || tq->comp_ring.base || tq->buf_info); - tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size - * sizeof(struct Vmxnet3_TxDesc), - &tq->tx_ring.basePA); + tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), + &tq->tx_ring.basePA, GFP_KERNEL); if (!tq->tx_ring.base) { netdev_err(adapter->netdev, "failed to allocate tx ring\n"); goto err; } - tq->data_ring.base = pci_alloc_consistent(adapter->pdev, - tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc), - &tq->data_ring.basePA); + tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), + &tq->data_ring.basePA, GFP_KERNEL); if (!tq->data_ring.base) { netdev_err(adapter->netdev, "failed to allocate data ring\n"); goto err; } - tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, - tq->comp_ring.size * - sizeof(struct Vmxnet3_TxCompDesc), - &tq->comp_ring.basePA); + tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), + &tq->comp_ring.basePA, GFP_KERNEL); if (!tq->comp_ring.base) { netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); goto err; } - tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), - GFP_KERNEL); + sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); + tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, + &tq->buf_info_pa, GFP_KERNEL); if (!tq->buf_info) goto err; @@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, break; } - rbi->dma_addr = pci_map_single(adapter->pdev, + rbi->dma_addr = dma_map_single( + &adapter->pdev->dev, rbi->skb->data, rbi->len, PCI_DMA_FROMDEVICE); } else { @@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, rq->stats.rx_buf_alloc_failure++; break; } - rbi->dma_addr = pci_map_page(adapter->pdev, + rbi->dma_addr = dma_map_page( + &adapter->pdev->dev, rbi->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); } else { @@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi = tq->buf_info + tq->tx_ring.next2fill; tbi->map_type = VMXNET3_MAP_SINGLE; - tbi->dma_addr = pci_map_single(adapter->pdev, + tbi->dma_addr = dma_map_single(&adapter->pdev->dev, skb->data + buf_offset, buf_size, PCI_DMA_TODEVICE); @@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, goto rcd_done; } - pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, + dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, + rbi->len, PCI_DMA_FROMDEVICE); #ifdef VMXNET3_RSS @@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, /* Immediate refill */ rbi->skb = new_skb; - rbi->dma_addr = pci_map_single(adapter->pdev, + rbi->dma_addr = dma_map_single(&adapter->pdev->dev, rbi->skb->data, rbi->len, PCI_DMA_FROMDEVICE); rxd->addr = cpu_to_le64(rbi->dma_addr); @@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, } if (rcd->len) { - pci_unmap_page(adapter->pdev, + dma_unmap_page(&adapter->pdev->dev, rbi->dma_addr, rbi->len, PCI_DMA_FROMDEVICE); @@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, /* Immediate refill */ rbi->page = new_page; - rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, + rbi->dma_addr = dma_map_page(&adapter->pdev->dev, + rbi->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); rxd->addr = cpu_to_le64(rbi->dma_addr); @@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && rq->buf_info[ring_idx][i].skb) { - pci_unmap_single(adapter->pdev, rxd->addr, + dma_unmap_single(&adapter->pdev->dev, rxd->addr, rxd->len, PCI_DMA_FROMDEVICE); dev_kfree_skb(rq->buf_info[ring_idx][i].skb); rq->buf_info[ring_idx][i].skb = NULL; } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && rq->buf_info[ring_idx][i].page) { - pci_unmap_page(adapter->pdev, rxd->addr, + dma_unmap_page(&adapter->pdev->dev, rxd->addr, rxd->len, PCI_DMA_FROMDEVICE); put_page(rq->buf_info[ring_idx][i].page); rq->buf_info[ring_idx][i].page = NULL; @@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } - kfree(rq->buf_info[0]); - for (i = 0; i < 2; i++) { if (rq->rx_ring[i].base) { - pci_free_consistent(adapter->pdev, rq->rx_ring[i].size - * sizeof(struct Vmxnet3_RxDesc), - rq->rx_ring[i].base, - rq->rx_ring[i].basePA); + dma_free_coherent(&adapter->pdev->dev, + rq->rx_ring[i].size + * sizeof(struct Vmxnet3_RxDesc), + rq->rx_ring[i].base, + rq->rx_ring[i].basePA); rq->rx_ring[i].base = NULL; } rq->buf_info[i] = NULL; } if (rq->comp_ring.base) { - pci_free_consistent(adapter->pdev, rq->comp_ring.size * - sizeof(struct Vmxnet3_RxCompDesc), - rq->comp_ring.base, rq->comp_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size + * sizeof(struct Vmxnet3_RxCompDesc), + rq->comp_ring.base, rq->comp_ring.basePA); rq->comp_ring.base = NULL; } + + if (rq->buf_info[0]) { + size_t sz = sizeof(struct vmxnet3_rx_buf_info) * + (rq->rx_ring[0].size + rq->rx_ring[1].size); + dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], + rq->buf_info_pa); + } } @@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) for (i = 0; i < 2; i++) { sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); - rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, - &rq->rx_ring[i].basePA); + rq->rx_ring[i].base = dma_alloc_coherent( + &adapter->pdev->dev, sz, + &rq->rx_ring[i].basePA, + GFP_KERNEL); if (!rq->rx_ring[i].base) { netdev_err(adapter->netdev, "failed to allocate rx ring %d\n", i); @@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) } sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); - rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, - &rq->comp_ring.basePA); + rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, + &rq->comp_ring.basePA, + GFP_KERNEL); if (!rq->comp_ring.base) { netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); goto err; @@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + rq->rx_ring[1].size); - bi = kzalloc(sz, GFP_KERNEL); + bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, + GFP_KERNEL); if (!bi) goto err; @@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev) struct Vmxnet3_RxFilterConf *rxConf = &adapter->shared->devRead.rxFilterConf; u8 *new_table = NULL; + dma_addr_t new_table_pa = 0; u32 new_mode = VMXNET3_RXM_UCAST; if (netdev->flags & IFF_PROMISC) { @@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev) new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTableLen = cpu_to_le16( netdev_mc_count(netdev) * ETH_ALEN); - rxConf->mfTablePA = cpu_to_le64(virt_to_phys( - new_table)); + new_table_pa = dma_map_single( + &adapter->pdev->dev, + new_table, + rxConf->mfTableLen, + PCI_DMA_TODEVICE); + rxConf->mfTablePA = cpu_to_le64(new_table_pa); } else { netdev_info(netdev, "failed to copy mcast list" ", setting ALL_MULTI\n"); @@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev) VMXNET3_CMD_UPDATE_MAC_FILTERS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); - kfree(new_table); + if (new_table) { + dma_unmap_single(&adapter->pdev->dev, new_table_pa, + rxConf->mfTableLen, PCI_DMA_TODEVICE); + kfree(new_table); + } } void @@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); - devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); + devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); /* set up feature flags */ @@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); - tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); + tqc->ddPA = cpu_to_le64(tq->buf_info_pa); tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); @@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); - rqc->ddPA = cpu_to_le64(virt_to_phys( - rq->buf_info)); + rqc->ddPA = cpu_to_le64(rq->buf_info_pa); rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); @@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) i, adapter->num_rx_queues); devRead->rssConfDesc.confVer = 1; - devRead->rssConfDesc.confLen = sizeof(*rssConf); - devRead->rssConfDesc.confPA = virt_to_phys(rssConf); + devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); + devRead->rssConfDesc.confPA = + cpu_to_le64(adapter->rss_conf_pa); } #endif /* VMXNET3_RSS */ @@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->pdev = pdev; spin_lock_init(&adapter->cmd_lock); - adapter->shared = pci_alloc_consistent(adapter->pdev, - sizeof(struct Vmxnet3_DriverShared), - &adapter->shared_pa); + adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, + sizeof(struct vmxnet3_adapter), + PCI_DMA_TODEVICE); + adapter->shared = dma_alloc_coherent( + &adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + &adapter->shared_pa, GFP_KERNEL); if (!adapter->shared) { dev_err(&pdev->dev, "Failed to allocate memory\n"); err = -ENOMEM; @@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; - adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, - &adapter->queue_desc_pa); + adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, + &adapter->queue_desc_pa, + GFP_KERNEL); if (!adapter->tqd_start) { dev_err(&pdev->dev, "Failed to allocate memory\n"); @@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + adapter->num_tx_queues); - adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); + adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_PMConf), + &adapter->pm_conf_pa, + GFP_KERNEL); if (adapter->pm_conf == NULL) { err = -ENOMEM; goto err_alloc_pm; @@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev, #ifdef VMXNET3_RSS - adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); + adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct UPT1_RSSConf), + &adapter->rss_conf_pa, + GFP_KERNEL); if (adapter->rss_conf == NULL) { err = -ENOMEM; goto err_alloc_rss; @@ -3077,17 +3116,22 @@ err_ver: vmxnet3_free_pci_resources(adapter); err_alloc_pci: #ifdef VMXNET3_RSS - kfree(adapter->rss_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), + adapter->rss_conf, adapter->rss_conf_pa); err_alloc_rss: #endif - kfree(adapter->pm_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), + adapter->pm_conf, adapter->pm_conf_pa); err_alloc_pm: - pci_free_consistent(adapter->pdev, size, adapter->tqd_start, - adapter->queue_desc_pa); + dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, + adapter->queue_desc_pa); err_alloc_queue_desc: - pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), - adapter->shared, adapter->shared_pa); + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); err_alloc_shared: + dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, + sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; @@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev) vmxnet3_free_intr_resources(adapter); vmxnet3_free_pci_resources(adapter); #ifdef VMXNET3_RSS - kfree(adapter->rss_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), + adapter->rss_conf, adapter->rss_conf_pa); #endif - kfree(adapter->pm_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), + adapter->pm_conf, adapter->pm_conf_pa); size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; - pci_free_consistent(adapter->pdev, size, adapter->tqd_start, - adapter->queue_desc_pa); - pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), - adapter->shared, adapter->shared_pa); + dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, + adapter->queue_desc_pa); + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); + dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, + sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); free_netdev(netdev); } @@ -3227,8 +3276,8 @@ skip_arp: adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confPA = + cpu_to_le64(adapter->pm_conf_pa); spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, @@ -3265,8 +3314,8 @@ vmxnet3_resume(struct device *device) adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confPA = + cpu_to_le64(adapter->pm_conf_pa); netif_device_attach(netdev); pci_set_power_state(pdev, PCI_D0); diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3541814..a03f358 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -70,10 +70,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00 +#define VMXNET3_DRIVER_VERSION_NUM 0x01020000 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ @@ -229,6 +229,7 @@ struct vmxnet3_tx_queue { spinlock_t tx_lock; struct vmxnet3_cmd_ring tx_ring; struct vmxnet3_tx_buf_info *buf_info; + dma_addr_t buf_info_pa; struct vmxnet3_tx_data_ring data_ring; struct vmxnet3_comp_ring comp_ring; struct Vmxnet3_TxQueueCtrl *shared; @@ -277,6 +278,7 @@ struct vmxnet3_rx_queue { u32 qid; /* rqID in RCD for buffer from 1st ring */ u32 qid2; /* rqID in RCD for buffer from 2nd ring */ struct vmxnet3_rx_buf_info *buf_info[2]; + dma_addr_t buf_info_pa; struct Vmxnet3_RxQueueCtrl *shared; struct vmxnet3_rq_driver_stats stats; } __attribute__((__aligned__(SMP_CACHE_BYTES))); @@ -353,6 +355,10 @@ struct vmxnet3_adapter { unsigned long state; /* VMXNET3_STATE_BIT_xxx */ int share_intr; + + dma_addr_t adapter_pa; + dma_addr_t pm_conf_pa; + dma_addr_t rss_conf_pa; }; #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ -- cgit v0.10.2 From e9d198403b4b8e30ac5ed0fe4bf05b273c8af0bd Mon Sep 17 00:00:00 2001 From: Rasesh Mody Date: Fri, 23 Aug 2013 14:31:30 -0700 Subject: bna: firmware update to 3.2.1.1 This patch updates the firmware to address the thermal notification issue Signed-off-by: Rasesh Mody Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index c37f706..43405f6 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -37,8 +37,8 @@ extern char bfa_version[]; -#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin" -#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" +#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin" #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #pragma pack(1) -- cgit v0.10.2 From d7064f4c192c19958c72b13cd9556815bedc0432 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Fri, 23 Aug 2013 17:19:23 -0700 Subject: Documentation/networking/: Update Intel wired LAN driver documentation Updates the documentation to the Intel wired LAN drivers. Signed-off-by: Jeff Kirsher Tested-by: Aaron Brown Tested-by: Phil Schmitt Signed-off-by: David S. Miller diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt index fcb6c71c..13a3212 100644 --- a/Documentation/networking/e100.txt +++ b/Documentation/networking/e100.txt @@ -1,7 +1,7 @@ Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters ============================================================== -November 15, 2005 +March 15, 2011 Contents ======== @@ -122,7 +122,7 @@ Additional Configurations NOTE: This setting is not saved across reboots. - Ethtool + ethtool ------- The driver utilizes the ethtool interface for driver configuration and diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt index 71ca958..437b209 100644 --- a/Documentation/networking/e1000.txt +++ b/Documentation/networking/e1000.txt @@ -1,8 +1,8 @@ -Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters -=============================================================== +Linux* Base Driver for Intel(R) Ethernet Network Connection +=========================================================== Intel Gigabit Linux driver. -Copyright(c) 1999 - 2010 Intel Corporation. +Copyright(c) 1999 - 2013 Intel Corporation. Contents ======== @@ -420,15 +420,15 @@ Additional Configurations - The maximum MTU setting for Jumbo Frames is 16110. This value coincides with the maximum Jumbo Frames size of 16128. - - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or - loss of link. + - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in + poor performance or loss of link. - Adapters based on the Intel(R) 82542 and 82573V/E controller do not support Jumbo Frames. These correspond to the following product names: Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network Connection - Ethtool + ethtool ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. The ethtool diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt index 97b5ba9..ad2d9f3 100644 --- a/Documentation/networking/e1000e.txt +++ b/Documentation/networking/e1000e.txt @@ -1,8 +1,8 @@ -Linux* Driver for Intel(R) Network Connection -============================================= +Linux* Driver for Intel(R) Ethernet Network Connection +====================================================== Intel Gigabit Linux driver. -Copyright(c) 1999 - 2010 Intel Corporation. +Copyright(c) 1999 - 2013 Intel Corporation. Contents ======== @@ -259,13 +259,16 @@ Additional Configurations - The maximum MTU setting for Jumbo Frames is 9216. This value coincides with the maximum Jumbo Frames size of 9234 bytes. - - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in + - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in poor performance or loss of link. - Some adapters limit Jumbo Frames sized packets to a maximum of 4096 bytes and some adapters do not support Jumbo Frames. - Ethtool + - Jumbo Frames cannot be configured on an 82579-based Network device, if + MACSec is enabled on the system. + + ethtool ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. We @@ -273,6 +276,9 @@ Additional Configurations http://ftp.kernel.org/pub/software/network/ethtool/ + NOTE: When validating enable/disable tests on some parts (82578, for example) + you need to add a few seconds between tests when working with ethtool. + Speed and Duplex ---------------- Speed and Duplex are configured through the ethtool* utility. For diff --git a/Documentation/networking/igb.txt b/Documentation/networking/igb.txt index 9a2a037..4ebbd65 100644 --- a/Documentation/networking/igb.txt +++ b/Documentation/networking/igb.txt @@ -1,8 +1,8 @@ -Linux* Base Driver for Intel(R) Network Connection -================================================== +Linux* Base Driver for Intel(R) Ethernet Network Connection +=========================================================== Intel Gigabit Linux driver. -Copyright(c) 1999 - 2010 Intel Corporation. +Copyright(c) 1999 - 2013 Intel Corporation. Contents ======== @@ -36,6 +36,53 @@ Default Value: 0 This parameter adds support for SR-IOV. It causes the driver to spawn up to max_vfs worth of virtual function. +QueuePairs +---------- +Valid Range: 0-1 +Default Value: 1 (TX and RX will be paired onto one interrupt vector) + +If set to 0, when MSI-X is enabled, the TX and RX will attempt to occupy +separate vectors. + +This option can be overridden to 1 if there are not sufficient interrupts +available. This can occur if any combination of RSS, VMDQ, and max_vfs +results in more than 4 queues being used. + +Node +---- +Valid Range: 0-n +Default Value: -1 (off) + + 0 - n: where n is the number of the NUMA node that should be used to + allocate memory for this adapter port. + -1: uses the driver default of allocating memory on whichever processor is + running insmod/modprobe. + + The Node parameter will allow you to pick which NUMA node you want to have + the adapter allocate memory from. All driver structures, in-memory queues, + and receive buffers will be allocated on the node specified. This parameter + is only useful when interrupt affinity is specified, otherwise some portion + of the time the interrupt could run on a different core than the memory is + allocated on, causing slower memory access and impacting throughput, CPU, or + both. + +EEE +--- +Valid Range: 0-1 +Default Value: 1 (enabled) + + A link between two EEE-compliant devices will result in periodic bursts of + data followed by long periods where in the link is in an idle state. This Low + Power Idle (LPI) state is supported in both 1Gbps and 100Mbps link speeds. + NOTE: EEE support requires autonegotiation. + +DMAC +---- +Valid Range: 0-1 +Default Value: 1 (enabled) + Enables or disables DMA Coalescing feature. + + Additional Configurations ========================= @@ -55,10 +102,10 @@ Additional Configurations - The maximum MTU setting for Jumbo Frames is 9216. This value coincides with the maximum Jumbo Frames size of 9234 bytes. - - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or - loss of link. + - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in + poor performance or loss of link. - Ethtool + ethtool ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. The latest @@ -106,6 +153,14 @@ Additional Configurations Where n=the VF that attempted to do the spoofing. + Setting MAC Address, VLAN and Rate Limit Using IProute2 Tool + ------------------------------------------------------------ + You can set a MAC address of a Virtual Function (VF), a default VLAN and the + rate limit using the IProute2 tool. Download the latest version of the + iproute2 tool from Sourceforge if your version does not have all the + features you require. + + Support ======= diff --git a/Documentation/networking/igbvf.txt b/Documentation/networking/igbvf.txt index cbfe4ee..40db17a 100644 --- a/Documentation/networking/igbvf.txt +++ b/Documentation/networking/igbvf.txt @@ -1,8 +1,8 @@ -Linux* Base Driver for Intel(R) Network Connection -================================================== +Linux* Base Driver for Intel(R) Ethernet Network Connection +=========================================================== Intel Gigabit Linux driver. -Copyright(c) 1999 - 2010 Intel Corporation. +Copyright(c) 1999 - 2013 Intel Corporation. Contents ======== @@ -55,7 +55,7 @@ networking link on the left to search for your adapter: Additional Configurations ========================= - Ethtool + ethtool ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. The ethtool diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt index d75a1f9..1e0c045 100644 --- a/Documentation/networking/ixgb.txt +++ b/Documentation/networking/ixgb.txt @@ -1,7 +1,7 @@ -Linux Base Driver for 10 Gigabit Intel(R) Network Connection -============================================================= +Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection +===================================================================== -October 9, 2007 +March 14, 2011 Contents @@ -274,9 +274,9 @@ Additional Configurations ------------------------------------------------- Configuring a network driver to load properly when the system is started is distribution dependent. Typically, the configuration process involves adding - an alias line to files in /etc/modprobe.d/ as well as editing other system - startup scripts and/or configuration files. Many popular Linux distributions - ship with tools to make these changes for you. To learn the proper way to + an alias line to /etc/modprobe.conf as well as editing other system startup + scripts and/or configuration files. Many popular Linux distributions ship + with tools to make these changes for you. To learn the proper way to configure a network device for your system, refer to your distribution documentation. If during this process you are asked for the driver or module name, the name for the Linux Base Driver for the Intel 10GbE Family of @@ -306,7 +306,7 @@ Additional Configurations with the maximum Jumbo Frames size of 16128. - Ethtool + ethtool ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. The ethtool diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt index af77ed3..96ccceb 100644 --- a/Documentation/networking/ixgbe.txt +++ b/Documentation/networking/ixgbe.txt @@ -1,8 +1,9 @@ -Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection -======================================================================== +Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Family of +Adapters +============================================================================= -Intel Gigabit Linux driver. -Copyright(c) 1999 - 2010 Intel Corporation. +Intel 10 Gigabit Linux driver. +Copyright(c) 1999 - 2013 Intel Corporation. Contents ======== @@ -16,8 +17,8 @@ Contents Identifying Your Adapter ======================== -The driver in this release is compatible with 82598 and 82599-based Intel -Network Connections. +The driver in this release is compatible with 82598, 82599 and X540-based +Intel Network Connections. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide at: @@ -72,7 +73,7 @@ cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Laser turns off for SFP+ when ifconfig down ------------------------------------------- "ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters. -"ifconfig up" turns on the later. +"ifconfig up" turns on the laser. 82598-BASED ADAPTERS @@ -118,6 +119,93 @@ NOTE: For 82598 backplane cards entering 1 gig mode, flow control default behavior is changed to off. Flow control in 1 gig mode on these devices can lead to Tx hangs. +Intel(R) Ethernet Flow Director +------------------------------- +Supports advanced filters that direct receive packets by their flows to +different queues. Enables tight control on routing a flow in the platform. +Matches flows and CPU cores for flow affinity. Supports multiple parameters +for flexible flow classification and load balancing. + +Flow director is enabled only if the kernel is multiple TX queue capable. + +An included script (set_irq_affinity.sh) automates setting the IRQ to CPU +affinity. + +You can verify that the driver is using Flow Director by looking at the counter +in ethtool: fdir_miss and fdir_match. + +Other ethtool Commands: +To enable Flow Director + ethtool -K ethX ntuple on +To add a filter + Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 0x178000a + action 1 +To see the list of filters currently present: + ethtool -u ethX + +Perfect Filter: Perfect filter is an interface to load the filter table that +funnels all flow into queue_0 unless an alternative queue is specified using +"action". In that case, any flow that matches the filter criteria will be +directed to the appropriate queue. + +If the queue is defined as -1, filter will drop matching packets. + +To account for filter matches and misses, there are two stats in ethtool: +fdir_match and fdir_miss. In addition, rx_queue_N_packets shows the number of +packets processed by the Nth queue. + +NOTE: Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not +compatible with Flow Director. IF Flow Director is enabled, these will be +disabled. + +The following three parameters impact Flow Director. + +FdirMode +-------- +Valid Range: 0-2 (0=off, 1=ATR, 2=Perfect filter mode) +Default Value: 1 + + Flow Director filtering modes. + +FdirPballoc +----------- +Valid Range: 0-2 (0=64k, 1=128k, 2=256k) +Default Value: 0 + + Flow Director allocated packet buffer size. + +AtrSampleRate +-------------- +Valid Range: 1-100 +Default Value: 20 + + Software ATR Tx packet sample rate. For example, when set to 20, every 20th + packet, looks to see if the packet will create a new flow. + +Node +---- +Valid Range: 0-n +Default Value: 1 (off) + + 0 - n: where n is the number of NUMA nodes (i.e. 0 - 3) currently online in + your system + 1: turns this option off + + The Node parameter will allow you to pick which NUMA node you want to have + the adapter allocate memory on. + +max_vfs +------- +Valid Range: 1-63 +Default Value: 0 + + If the value is greater than 0 it will also force the VMDq parameter to be 1 + or more. + + This parameter adds support for SR-IOV. It causes the driver to spawn up to + max_vfs worth of virtual function. + + Additional Configurations ========================= @@ -221,9 +309,10 @@ http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf Known Issues ============ - Enabling SR-IOV in a 32-bit Microsoft* Windows* Server 2008 Guest OS using - Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE controller under KVM - ----------------------------------------------------------------------------- + Enabling SR-IOV in a 32-bit or 64-bit Microsoft* Windows* Server 2008/R2 + Guest OS using Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE + controller under KVM + ------------------------------------------------------------------------ KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This includes traditional PCIe devices, as well as SR-IOV-capable devices using Intel 82576-based and 82599-based controllers. diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt index 5a91a41..53d8d2a 100644 --- a/Documentation/networking/ixgbevf.txt +++ b/Documentation/networking/ixgbevf.txt @@ -1,8 +1,8 @@ -Linux* Base Driver for Intel(R) Network Connection -================================================== +Linux* Base Driver for Intel(R) Ethernet Network Connection +=========================================================== Intel Gigabit Linux driver. -Copyright(c) 1999 - 2010 Intel Corporation. +Copyright(c) 1999 - 2013 Intel Corporation. Contents ======== -- cgit v0.10.2 From 35fdb94b453bc69b7bc74b717f1e03d41d4bcdba Mon Sep 17 00:00:00 2001 From: Steven La Date: Fri, 23 Aug 2013 17:19:37 -0700 Subject: e1000e: balance semaphore put/get for 82573 Steven (cc-ed) noticed an imbalance in semaphore put/get for 82573-based NICs. Don't we need something like the following (untested) patch? Signed-off-by: Steven La Acked-by: Arthur Kepner Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 104fcec..8fed74e 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1011,6 +1011,11 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) /* Must release MDIO ownership and mutex after MAC reset. */ switch (hw->mac.type) { + case e1000_82573: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82573(hw); + break; case e1000_82574: case e1000_82583: /* Release mutex only if the hw semaphore is acquired */ -- cgit v0.10.2 From 5828cd9a68873df1340b420371c02c47647878fb Mon Sep 17 00:00:00 2001 From: Andy Zhou Date: Tue, 27 Aug 2013 13:02:21 -0700 Subject: openvswitch: optimize flow compare and mask functions Make sure the sw_flow_key structure and valid mask boundaries are always machine word aligned. Optimize the flow compare and mask operations using machine word size operations. This patch improves throughput on average by 15% when CPU is the bottleneck of forwarding packets. This patch is inspired by ideas and code from a patch submitted by Peter Klausler titled "replace memcmp() with specialized comparator". However, The original patch only optimizes for architectures support unaligned machine word access. This patch optimizes for all architectures. Signed-off-by: Andy Zhou Signed-off-by: Jesse Gross diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 80bcb96..ad1aeeb 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -54,8 +54,8 @@ static void update_range__(struct sw_flow_match *match, size_t offset, size_t size, bool is_mask) { struct sw_flow_key_range *range = NULL; - size_t start = offset; - size_t end = offset + size; + size_t start = rounddown(offset, sizeof(long)); + size_t end = roundup(offset + size, sizeof(long)); if (!is_mask) range = &match->range; @@ -102,6 +102,11 @@ static void update_range__(struct sw_flow_match *match, } \ } while (0) +static u16 range_n_bytes(const struct sw_flow_key_range *range) +{ + return range->end - range->start; +} + void ovs_match_init(struct sw_flow_match *match, struct sw_flow_key *key, struct sw_flow_mask *mask) @@ -370,16 +375,17 @@ static bool icmp6hdr_ok(struct sk_buff *skb) void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, const struct sw_flow_mask *mask) { - u8 *m = (u8 *)&mask->key + mask->range.start; - u8 *s = (u8 *)src + mask->range.start; - u8 *d = (u8 *)dst + mask->range.start; + const long *m = (long *)((u8 *)&mask->key + mask->range.start); + const long *s = (long *)((u8 *)src + mask->range.start); + long *d = (long *)((u8 *)dst + mask->range.start); int i; - memset(dst, 0, sizeof(*dst)); - for (i = 0; i < ovs_sw_flow_mask_size_roundup(mask); i++) { - *d = *s & *m; - d++, s++, m++; - } + /* The memory outside of the 'mask->range' are not set since + * further operations on 'dst' only uses contents within + * 'mask->range'. + */ + for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) + *d++ = *s++ & *m++; } #define TCP_FLAGS_OFFSET 13 @@ -1000,8 +1006,13 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_end) { - return jhash2((u32 *)((u8 *)key + key_start), - DIV_ROUND_UP(key_end - key_start, sizeof(u32)), 0); + u32 *hash_key = (u32 *)((u8 *)key + key_start); + int hash_u32s = (key_end - key_start) >> 2; + + /* Make sure number of hash bytes are multiple of u32. */ + BUILD_BUG_ON(sizeof(long) % sizeof(u32)); + + return jhash2(hash_key, hash_u32s, 0); } static int flow_key_start(const struct sw_flow_key *key) @@ -1009,17 +1020,25 @@ static int flow_key_start(const struct sw_flow_key *key) if (key->tun_key.ipv4_dst) return 0; else - return offsetof(struct sw_flow_key, phy); + return rounddown(offsetof(struct sw_flow_key, phy), + sizeof(long)); } static bool __cmp_key(const struct sw_flow_key *key1, const struct sw_flow_key *key2, int key_start, int key_end) { - return !memcmp((u8 *)key1 + key_start, - (u8 *)key2 + key_start, (key_end - key_start)); + const long *cp1 = (long *)((u8 *)key1 + key_start); + const long *cp2 = (long *)((u8 *)key2 + key_start); + long diffs = 0; + int i; + + for (i = key_start; i < key_end; i += sizeof(long)) + diffs |= *cp1++ ^ *cp2++; + + return diffs == 0; } -static bool __flow_cmp_key(const struct sw_flow *flow, +static bool __flow_cmp_masked_key(const struct sw_flow *flow, const struct sw_flow_key *key, int key_start, int key_end) { return __cmp_key(&flow->key, key, key_start, key_end); @@ -1056,7 +1075,7 @@ struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, } static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, - const struct sw_flow_key *flow_key, + const struct sw_flow_key *unmasked, struct sw_flow_mask *mask) { struct sw_flow *flow; @@ -1066,12 +1085,13 @@ static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, u32 hash; struct sw_flow_key masked_key; - ovs_flow_key_mask(&masked_key, flow_key, mask); + ovs_flow_key_mask(&masked_key, unmasked, mask); hash = ovs_flow_hash(&masked_key, key_start, key_end); head = find_bucket(table, hash); hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { if (flow->mask == mask && - __flow_cmp_key(flow, &masked_key, key_start, key_end)) + __flow_cmp_masked_key(flow, &masked_key, + key_start, key_end)) return flow; } return NULL; @@ -1961,6 +1981,8 @@ nla_put_failure: * Returns zero if successful or a negative error code. */ int ovs_flow_init(void) { + BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); + flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 0, NULL); if (flow_cache == NULL) @@ -2016,7 +2038,7 @@ static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a, return (a->range.end == b->range.end) && (a->range.start == b->range.start) - && (memcmp(a_, b_, ovs_sw_flow_mask_actual_size(a)) == 0); + && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); } struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, @@ -2053,5 +2075,5 @@ static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, u8 *m = (u8 *)&mask->key + range->start; mask->range = *range; - memset(m, val, ovs_sw_flow_mask_size_roundup(mask)); + memset(m, val, range_n_bytes(range)); } diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index e793051..b65f885 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -125,7 +125,7 @@ struct sw_flow_key { } nd; } ipv6; }; -}; +} __aligned(__alignof__(long)); struct sw_flow { struct rcu_head rcu; @@ -149,11 +149,6 @@ struct sw_flow_key_range { size_t end; }; -static inline u16 ovs_sw_flow_key_range_actual_size(const struct sw_flow_key_range *range) -{ - return range->end - range->start; -} - struct sw_flow_match { struct sw_flow_key *key; struct sw_flow_key_range range; @@ -253,18 +248,6 @@ struct sw_flow_mask { struct sw_flow_key key; }; -static inline u16 -ovs_sw_flow_mask_actual_size(const struct sw_flow_mask *mask) -{ - return ovs_sw_flow_key_range_actual_size(&mask->range); -} - -static inline u16 -ovs_sw_flow_mask_size_roundup(const struct sw_flow_mask *mask) -{ - return roundup(ovs_sw_flow_mask_actual_size(mask), sizeof(u32)); -} - struct sw_flow_mask *ovs_sw_flow_mask_alloc(void); void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *); void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred); -- cgit v0.10.2 From 45a3fd55acc8989ff93d469e57b123cd3702a948 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 28 Nov 2012 04:38:14 +0000 Subject: sfc: Move MTD operations into efx_nic_type Merge the per-NIC-type MTD probe selection and struct efx_mtd_ops into struct efx_nic_type. Move the implementations into the appropriate source files. Several NVRAM functions are now only called from MTD operations which are now implemented in the same file (falcon.c or mcdi.c). There is no need for them to be extern, or to be defined at all if CONFIG_SFC_MTD is not enabled, so move them into the #ifdef CONFIG_SFC_MTD sections in those files. Most of the SPI-related definitions are also only used in falcon.c, so move them there. Put the remainder of spi.h into nic.h (which previously included it). Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 9e35738..3bbc047 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -204,7 +204,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx); /* MTD */ #ifdef CONFIG_SFC_MTD -extern int efx_mtd_probe(struct efx_nic *efx); +extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part); +static inline int efx_mtd_probe(struct efx_nic *efx) +{ + return efx->type->mtd_probe(efx); +} extern void efx_mtd_rename(struct efx_nic *efx); extern void efx_mtd_remove(struct efx_nic *efx); #else diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 304131b..3c99b6c 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -19,7 +19,6 @@ #include "net_driver.h" #include "bitfield.h" #include "efx.h" -#include "spi.h" #include "nic.h" #include "farch_regs.h" #include "io.h" @@ -159,11 +158,49 @@ /************************************************************************** * - * Non-volatile configuration + * Basic SPI command set and bit definitions + * + *************************************************************************/ + +#define SPI_WRSR 0x01 /* Write status register */ +#define SPI_WRITE 0x02 /* Write data to memory array */ +#define SPI_READ 0x03 /* Read data from memory array */ +#define SPI_WRDI 0x04 /* Reset write enable latch */ +#define SPI_RDSR 0x05 /* Read status register */ +#define SPI_WREN 0x06 /* Set write enable latch */ +#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */ + +#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */ +#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */ +#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */ +#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */ +#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */ +#define SPI_STATUS_NRDY 0x01 /* Device busy flag */ + +/************************************************************************** + * + * Non-volatile memory layout * ************************************************************************** */ +/* SFC4000 flash is partitioned into: + * 0-0x400 chip and board config (see struct falcon_nvconfig) + * 0x400-0x8000 unused (or may contain VPD if EEPROM not present) + * 0x8000-end boot code (mapped to PCI expansion ROM) + * SFC4000 small EEPROM (size < 0x400) is used for VPD only. + * SFC4000 large EEPROM (size >= 0x400) is partitioned into: + * 0-0x400 chip and board config + * configurable VPD + * 0x800-0x1800 boot config + * Aside from the chip and board config, all of these are optional and may + * be absent or truncated depending on the devices used. + */ +#define FALCON_NVCONFIG_END 0x400U +#define FALCON_FLASH_BOOTCODE_START 0x8000U +#define FALCON_EEPROM_BOOTCONFIG_START 0x800U +#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U + /* Board configuration v2 (v1 is obsolete; later versions are compatible) */ struct falcon_nvconfig_board_v2 { __le16 nports; @@ -434,9 +471,10 @@ static int falcon_spi_wait(struct efx_nic *efx) } } -int falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi, - unsigned int command, int address, - const void *in, void *out, size_t len) +static int +falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi, + unsigned int command, int address, + const void *in, void *out, size_t len) { bool addressed = (address >= 0); bool reading = (out != NULL); @@ -490,13 +528,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi, return 0; } -static size_t -falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start) -{ - return min(FALCON_SPI_MAX_LEN, - (spi->block_size - (start & (spi->block_size - 1)))); -} - static inline u8 falcon_spi_munge_command(const struct falcon_spi_device *spi, const u8 command, const unsigned int address) @@ -504,34 +535,9 @@ falcon_spi_munge_command(const struct falcon_spi_device *spi, return command | (((address >> 8) & spi->munge_address) << 3); } -/* Wait up to 10 ms for buffered write completion */ -int -falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi) -{ - unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); - u8 status; - int rc; - - for (;;) { - rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, - &status, sizeof(status)); - if (rc) - return rc; - if (!(status & SPI_STATUS_NRDY)) - return 0; - if (time_after_eq(jiffies, timeout)) { - netif_err(efx, hw, efx->net_dev, - "SPI write timeout on device %d" - " last status=0x%02x\n", - spi->device_id, status); - return -ETIMEDOUT; - } - schedule_timeout_uninterruptible(1); - } -} - -int falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi, - loff_t start, size_t len, size_t *retlen, u8 *buffer) +static int +falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi, + loff_t start, size_t len, size_t *retlen, u8 *buffer) { size_t block_len, pos = 0; unsigned int command; @@ -560,7 +566,51 @@ int falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi, return rc; } -int +#ifdef CONFIG_SFC_MTD + +struct falcon_mtd_partition { + struct efx_mtd_partition common; + const struct falcon_spi_device *spi; + size_t offset; +}; + +#define to_falcon_mtd_partition(mtd) \ + container_of(mtd, struct falcon_mtd_partition, common.mtd) + +static size_t +falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start) +{ + return min(FALCON_SPI_MAX_LEN, + (spi->block_size - (start & (spi->block_size - 1)))); +} + +/* Wait up to 10 ms for buffered write completion */ +static int +falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi) +{ + unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); + u8 status; + int rc; + + for (;;) { + rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, + &status, sizeof(status)); + if (rc) + return rc; + if (!(status & SPI_STATUS_NRDY)) + return 0; + if (time_after_eq(jiffies, timeout)) { + netif_err(efx, hw, efx->net_dev, + "SPI write timeout on device %d" + " last status=0x%02x\n", + spi->device_id, status); + return -ETIMEDOUT; + } + schedule_timeout_uninterruptible(1); + } +} + +static int falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { @@ -609,6 +659,238 @@ falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi, return rc; } +static int +falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible) +{ + const struct falcon_spi_device *spi = part->spi; + struct efx_nic *efx = part->common.mtd.priv; + u8 status; + int rc, i; + + /* Wait up to 4s for flash/EEPROM to finish a slow operation. */ + for (i = 0; i < 40; i++) { + __set_current_state(uninterruptible ? + TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); + schedule_timeout(HZ / 10); + rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, + &status, sizeof(status)); + if (rc) + return rc; + if (!(status & SPI_STATUS_NRDY)) + return 0; + if (signal_pending(current)) + return -EINTR; + } + pr_err("%s: timed out waiting for %s\n", + part->common.name, part->common.dev_type_name); + return -ETIMEDOUT; +} + +static int +falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi) +{ + const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | + SPI_STATUS_BP0); + u8 status; + int rc; + + rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, + &status, sizeof(status)); + if (rc) + return rc; + + if (!(status & unlock_mask)) + return 0; /* already unlocked */ + + rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); + if (rc) + return rc; + rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0); + if (rc) + return rc; + + status &= ~unlock_mask; + rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status, + NULL, sizeof(status)); + if (rc) + return rc; + rc = falcon_spi_wait_write(efx, spi); + if (rc) + return rc; + + return 0; +} + +#define FALCON_SPI_VERIFY_BUF_LEN 16 + +static int +falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len) +{ + const struct falcon_spi_device *spi = part->spi; + struct efx_nic *efx = part->common.mtd.priv; + unsigned pos, block_len; + u8 empty[FALCON_SPI_VERIFY_BUF_LEN]; + u8 buffer[FALCON_SPI_VERIFY_BUF_LEN]; + int rc; + + if (len != spi->erase_size) + return -EINVAL; + + if (spi->erase_command == 0) + return -EOPNOTSUPP; + + rc = falcon_spi_unlock(efx, spi); + if (rc) + return rc; + rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); + if (rc) + return rc; + rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL, + NULL, 0); + if (rc) + return rc; + rc = falcon_spi_slow_wait(part, false); + + /* Verify the entire region has been wiped */ + memset(empty, 0xff, sizeof(empty)); + for (pos = 0; pos < len; pos += block_len) { + block_len = min(len - pos, sizeof(buffer)); + rc = falcon_spi_read(efx, spi, start + pos, block_len, + NULL, buffer); + if (rc) + return rc; + if (memcmp(empty, buffer, block_len)) + return -EIO; + + /* Avoid locking up the system */ + cond_resched(); + if (signal_pending(current)) + return -EINTR; + } + + return rc; +} + +static void falcon_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s", + efx->name, part->type_name); +} + +static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + rc = mutex_lock_interruptible(&nic_data->spi_lock); + if (rc) + return rc; + rc = falcon_spi_read(efx, part->spi, part->offset + start, + len, retlen, buffer); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + rc = mutex_lock_interruptible(&nic_data->spi_lock); + if (rc) + return rc; + rc = falcon_spi_erase(part, part->offset + start, len); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + rc = mutex_lock_interruptible(&nic_data->spi_lock); + if (rc) + return rc; + rc = falcon_spi_write(efx, part->spi, part->offset + start, + len, retlen, buffer); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_sync(struct mtd_info *mtd) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + mutex_lock(&nic_data->spi_lock); + rc = falcon_spi_slow_wait(part, true); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_probe(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_mtd_partition *parts; + struct falcon_spi_device *spi; + size_t n_parts; + int rc = -ENODEV; + + ASSERT_RTNL(); + + /* Allocate space for maximum number of partitions */ + parts = kcalloc(2, sizeof(*parts), GFP_KERNEL); + n_parts = 0; + + spi = &nic_data->spi_flash; + if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { + parts[n_parts].spi = spi; + parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START; + parts[n_parts].common.dev_type_name = "flash"; + parts[n_parts].common.type_name = "sfc_flash_bootrom"; + parts[n_parts].common.mtd.type = MTD_NORFLASH; + parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH; + parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; + parts[n_parts].common.mtd.erasesize = spi->erase_size; + n_parts++; + } + + spi = &nic_data->spi_eeprom; + if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) { + parts[n_parts].spi = spi; + parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START; + parts[n_parts].common.dev_type_name = "EEPROM"; + parts[n_parts].common.type_name = "sfc_bootconfig"; + parts[n_parts].common.mtd.type = MTD_RAM; + parts[n_parts].common.mtd.flags = MTD_CAP_RAM; + parts[n_parts].common.mtd.size = + min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) - + FALCON_EEPROM_BOOTCONFIG_START; + parts[n_parts].common.mtd.erasesize = spi->erase_size; + n_parts++; + } + + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + /************************************************************************** * * XMAC operations @@ -2417,6 +2699,15 @@ const struct efx_nic_type falcon_a1_nic_type = { .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, .filter_get_rx_ids = efx_farch_filter_get_rx_ids, +#ifdef CONFIG_SFC_MTD + .mtd_probe = falcon_mtd_probe, + .mtd_rename = falcon_mtd_rename, + .mtd_read = falcon_mtd_read, + .mtd_erase = falcon_mtd_erase, + .mtd_write = falcon_mtd_write, + .mtd_sync = falcon_mtd_sync, +#endif + .revision = EFX_REV_FALCON_A1, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, @@ -2500,6 +2791,14 @@ const struct efx_nic_type falcon_b0_nic_type = { .filter_rfs_insert = efx_farch_filter_rfs_insert, .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, #endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = falcon_mtd_probe, + .mtd_rename = falcon_mtd_rename, + .mtd_read = falcon_mtd_read, + .mtd_erase = falcon_mtd_erase, + .mtd_write = falcon_mtd_write, + .mtd_sync = falcon_mtd_sync, +#endif .revision = EFX_REV_FALCON_B0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 1c8bf81..67b07de 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -811,125 +811,6 @@ fail: return rc; } -int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); - int rc; - - MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); - - BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), - NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, - loff_t offset, u8 *buffer, size_t length) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); - MCDI_DECLARE_BUF(outbuf, - MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); - MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); - MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); - - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - - memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, - loff_t offset, const u8 *buffer, size_t length) -{ - MCDI_DECLARE_BUF(inbuf, - MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); - int rc; - - MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); - MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); - MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); - memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); - - BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, - ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), - NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, - loff_t offset, size_t length) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); - int rc; - - MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); - MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); - MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); - - BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), - NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - -int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); - int rc; - - MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); - - BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), - NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; -} - static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); @@ -1272,3 +1153,236 @@ fail: return rc; } +#ifdef CONFIG_SFC_MTD + +#define EFX_MCDI_NVRAM_LEN_MAX 128 + +static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); + + BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, + loff_t offset, u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, + MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); + memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, + ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), + NULL, 0, NULL); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, + loff_t offset, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); + + BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk = part->common.mtd.erasesize; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + /* The MCDI interface can in fact do multiple erase blocks at once; + * but erasing may be slow, so we make multiple calls here to avoid + * tripping the MCDI RPC timeout. */ + while (offset < end) { + rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, + chunk); + if (rc) + goto out; + offset += chunk; + } +out: + return rc; +} + +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + int rc = 0; + + if (part->updating) { + part->updating = false; + rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); + } + + return rc; +} + +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_mcdi_mtd_partition *mcdi_part = + container_of(part, struct efx_mcdi_mtd_partition, common); + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s:%02x", + efx->name, part->type_name, mcdi_part->fw_subtype); +} + +#endif /* CONFIG_SFC_MTD */ diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index a465cc1..5f67ac3 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -65,6 +65,16 @@ struct efx_mcdi_mon { unsigned int n_attrs; }; +struct efx_mcdi_mtd_partition { + struct efx_mtd_partition common; + bool updating; + u8 nvram_type; + u16 fw_subtype; +}; + +#define to_efx_mcdi_mtd_partition(mtd) \ + container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd) + /** * struct efx_mcdi_data - extra state for NICs that implement MCDI * @iface: Interface/protocol state @@ -250,18 +260,6 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, bool *protected_out); -extern int efx_mcdi_nvram_update_start(struct efx_nic *efx, - unsigned int type); -extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, - loff_t offset, u8 *buffer, size_t length); -extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, - loff_t offset, const u8 *buffer, - size_t length); -#define EFX_MCDI_NVRAM_LEN_MAX 128 -extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, - loff_t offset, size_t length); -extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, - unsigned int type); extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); extern int efx_mcdi_handle_assertion(struct efx_nic *efx); extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); @@ -291,4 +289,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} #endif +#ifdef CONFIG_SFC_MTD +extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer); +extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); +extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer); +extern int efx_mcdi_mtd_sync(struct mtd_info *mtd); +extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); +#endif + #endif /* EFX_MCDI_H */ diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index f5819c7..8be9a69 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -8,167 +8,17 @@ * by the Free Software Foundation, incorporated herein by reference. */ -#include #include #include -#include #include #include #include "net_driver.h" -#include "spi.h" #include "efx.h" -#include "nic.h" -#include "mcdi.h" -#include "mcdi_pcol.h" - -#define FALCON_SPI_VERIFY_BUF_LEN 16 - -struct efx_mtd_partition { - struct list_head node; - struct mtd_info mtd; - const char *dev_type_name; - const char *type_name; - char name[IFNAMSIZ + 20]; -}; - -struct falcon_mtd_partition { - struct efx_mtd_partition common; - const struct falcon_spi_device *spi; - size_t offset; -}; - -struct efx_mtd_ops { - void (*rename)(struct efx_mtd_partition *part); - int (*read)(struct mtd_info *mtd, loff_t start, size_t len, - size_t *retlen, u8 *buffer); - int (*erase)(struct mtd_info *mtd, loff_t start, size_t len); - int (*write)(struct mtd_info *mtd, loff_t start, size_t len, - size_t *retlen, const u8 *buffer); - int (*sync)(struct mtd_info *mtd); -}; #define to_efx_mtd_partition(mtd) \ container_of(mtd, struct efx_mtd_partition, mtd) -#define to_falcon_mtd_partition(mtd) \ - container_of(mtd, struct falcon_mtd_partition, common.mtd) - -static int falcon_mtd_probe(struct efx_nic *efx); -static int siena_mtd_probe(struct efx_nic *efx); - -/* SPI utilities */ - -static int -falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible) -{ - const struct falcon_spi_device *spi = part->spi; - struct efx_nic *efx = part->common.mtd.priv; - u8 status; - int rc, i; - - /* Wait up to 4s for flash/EEPROM to finish a slow operation. */ - for (i = 0; i < 40; i++) { - __set_current_state(uninterruptible ? - TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 10); - rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, - &status, sizeof(status)); - if (rc) - return rc; - if (!(status & SPI_STATUS_NRDY)) - return 0; - if (signal_pending(current)) - return -EINTR; - } - pr_err("%s: timed out waiting for %s\n", - part->common.name, part->common.dev_type_name); - return -ETIMEDOUT; -} - -static int -falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi) -{ - const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | - SPI_STATUS_BP0); - u8 status; - int rc; - - rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, - &status, sizeof(status)); - if (rc) - return rc; - - if (!(status & unlock_mask)) - return 0; /* already unlocked */ - - rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); - if (rc) - return rc; - rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0); - if (rc) - return rc; - - status &= ~unlock_mask; - rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status, - NULL, sizeof(status)); - if (rc) - return rc; - rc = falcon_spi_wait_write(efx, spi); - if (rc) - return rc; - - return 0; -} - -static int -falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len) -{ - const struct falcon_spi_device *spi = part->spi; - struct efx_nic *efx = part->common.mtd.priv; - unsigned pos, block_len; - u8 empty[FALCON_SPI_VERIFY_BUF_LEN]; - u8 buffer[FALCON_SPI_VERIFY_BUF_LEN]; - int rc; - - if (len != spi->erase_size) - return -EINVAL; - - if (spi->erase_command == 0) - return -EOPNOTSUPP; - - rc = falcon_spi_unlock(efx, spi); - if (rc) - return rc; - rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); - if (rc) - return rc; - rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL, - NULL, 0); - if (rc) - return rc; - rc = falcon_spi_slow_wait(part, false); - - /* Verify the entire region has been wiped */ - memset(empty, 0xff, sizeof(empty)); - for (pos = 0; pos < len; pos += block_len) { - block_len = min(len - pos, sizeof(buffer)); - rc = falcon_spi_read(efx, spi, start + pos, block_len, - NULL, buffer); - if (rc) - return rc; - if (memcmp(empty, buffer, block_len)) - return -EIO; - - /* Avoid locking up the system */ - cond_resched(); - if (signal_pending(current)) - return -EINTR; - } - - return rc; -} - /* MTD interface */ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) @@ -176,7 +26,7 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) struct efx_nic *efx = mtd->priv; int rc; - rc = efx->mtd_ops->erase(mtd, erase->addr, erase->len); + rc = efx->type->mtd_erase(mtd, erase->addr, erase->len); if (rc == 0) { erase->state = MTD_ERASE_DONE; } else { @@ -193,7 +43,7 @@ static void efx_mtd_sync(struct mtd_info *mtd) struct efx_nic *efx = mtd->priv; int rc; - rc = efx->mtd_ops->sync(mtd); + rc = efx->type->mtd_sync(mtd); if (rc) pr_err("%s: %s sync failed (%d)\n", part->name, part->dev_type_name, rc); @@ -213,15 +63,8 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part) list_del(&part->node); } -static void efx_mtd_rename_partition(struct efx_mtd_partition *part) -{ - struct efx_nic *efx = part->mtd.priv; - - efx->mtd_ops->rename(part); -} - -static int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, - size_t n_parts, size_t sizeof_part) +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part) { struct efx_mtd_partition *part; size_t i; @@ -236,11 +79,11 @@ static int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, part->mtd.priv = efx; part->mtd.name = part->name; part->mtd._erase = efx_mtd_erase; - part->mtd._read = efx->mtd_ops->read; - part->mtd._write = efx->mtd_ops->write; + part->mtd._read = efx->type->mtd_read; + part->mtd._write = efx->type->mtd_write; part->mtd._sync = efx_mtd_sync; - efx_mtd_rename_partition(part); + efx->type->mtd_rename(part); if (mtd_device_register(&part->mtd, NULL, 0)) goto fail; @@ -286,396 +129,5 @@ void efx_mtd_rename(struct efx_nic *efx) ASSERT_RTNL(); list_for_each_entry(part, &efx->mtd_list, node) - efx_mtd_rename_partition(part); -} - -int efx_mtd_probe(struct efx_nic *efx) -{ - if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) - return siena_mtd_probe(efx); - else - return falcon_mtd_probe(efx); -} - -/* Implementation of MTD operations for Falcon */ - -static void falcon_mtd_rename(struct efx_mtd_partition *part) -{ - struct efx_nic *efx = part->mtd.priv; - - snprintf(part->name, sizeof(part->name), "%s %s", - efx->name, part->type_name); -} - -static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, u8 *buffer) -{ - struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - struct falcon_nic_data *nic_data = efx->nic_data; - int rc; - - rc = mutex_lock_interruptible(&nic_data->spi_lock); - if (rc) - return rc; - rc = falcon_spi_read(efx, part->spi, part->offset + start, - len, retlen, buffer); - mutex_unlock(&nic_data->spi_lock); - return rc; -} - -static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) -{ - struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - struct falcon_nic_data *nic_data = efx->nic_data; - int rc; - - rc = mutex_lock_interruptible(&nic_data->spi_lock); - if (rc) - return rc; - rc = falcon_spi_erase(part, part->offset + start, len); - mutex_unlock(&nic_data->spi_lock); - return rc; -} - -static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, const u8 *buffer) -{ - struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - struct falcon_nic_data *nic_data = efx->nic_data; - int rc; - - rc = mutex_lock_interruptible(&nic_data->spi_lock); - if (rc) - return rc; - rc = falcon_spi_write(efx, part->spi, part->offset + start, - len, retlen, buffer); - mutex_unlock(&nic_data->spi_lock); - return rc; -} - -static int falcon_mtd_sync(struct mtd_info *mtd) -{ - struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - struct falcon_nic_data *nic_data = efx->nic_data; - int rc; - - mutex_lock(&nic_data->spi_lock); - rc = falcon_spi_slow_wait(part, true); - mutex_unlock(&nic_data->spi_lock); - return rc; -} - -static const struct efx_mtd_ops falcon_mtd_ops = { - .rename = falcon_mtd_rename, - .read = falcon_mtd_read, - .erase = falcon_mtd_erase, - .write = falcon_mtd_write, - .sync = falcon_mtd_sync, -}; - -static int falcon_mtd_probe(struct efx_nic *efx) -{ - struct falcon_nic_data *nic_data = efx->nic_data; - struct falcon_mtd_partition *parts; - struct falcon_spi_device *spi; - size_t n_parts; - int rc = -ENODEV; - - ASSERT_RTNL(); - - efx->mtd_ops = &falcon_mtd_ops; - - /* Allocate space for maximum number of partitions */ - parts = kcalloc(2, sizeof(*parts), GFP_KERNEL); - n_parts = 0; - - spi = &nic_data->spi_flash; - if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { - parts[n_parts].spi = spi; - parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START; - parts[n_parts].common.dev_type_name = "flash"; - parts[n_parts].common.type_name = "sfc_flash_bootrom"; - parts[n_parts].common.mtd.type = MTD_NORFLASH; - parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH; - parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; - parts[n_parts].common.mtd.erasesize = spi->erase_size; - n_parts++; - } - - spi = &nic_data->spi_eeprom; - if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) { - parts[n_parts].spi = spi; - parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START; - parts[n_parts].common.dev_type_name = "EEPROM"; - parts[n_parts].common.type_name = "sfc_bootconfig"; - parts[n_parts].common.mtd.type = MTD_RAM; - parts[n_parts].common.mtd.flags = MTD_CAP_RAM; - parts[n_parts].common.mtd.size = - min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) - - FALCON_EEPROM_BOOTCONFIG_START; - parts[n_parts].common.mtd.erasesize = spi->erase_size; - n_parts++; - } - - rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); - if (rc) - kfree(parts); - return rc; -} - -/* Implementation of MTD operations for Siena */ - -struct efx_mcdi_mtd_partition { - struct efx_mtd_partition common; - bool updating; - u8 nvram_type; - u16 fw_subtype; -}; - -#define to_efx_mcdi_mtd_partition(mtd) \ - container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd) - -static void siena_mtd_rename(struct efx_mtd_partition *part) -{ - struct efx_mcdi_mtd_partition *mcdi_part = - container_of(part, struct efx_mcdi_mtd_partition, common); - struct efx_nic *efx = part->mtd.priv; - - snprintf(part->name, sizeof(part->name), "%s %s:%02x", - efx->name, part->type_name, mcdi_part->fw_subtype); -} - -static int siena_mtd_read(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, u8 *buffer) -{ - struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - loff_t offset = start; - loff_t end = min_t(loff_t, start + len, mtd->size); - size_t chunk; - int rc = 0; - - while (offset < end) { - chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); - rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, - buffer, chunk); - if (rc) - goto out; - offset += chunk; - buffer += chunk; - } -out: - *retlen = offset - start; - return rc; -} - -static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) -{ - struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); - loff_t end = min_t(loff_t, start + len, mtd->size); - size_t chunk = part->common.mtd.erasesize; - int rc = 0; - - if (!part->updating) { - rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); - if (rc) - goto out; - part->updating = true; - } - - /* The MCDI interface can in fact do multiple erase blocks at once; - * but erasing may be slow, so we make multiple calls here to avoid - * tripping the MCDI RPC timeout. */ - while (offset < end) { - rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, - chunk); - if (rc) - goto out; - offset += chunk; - } -out: - return rc; -} - -static int siena_mtd_write(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, const u8 *buffer) -{ - struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - loff_t offset = start; - loff_t end = min_t(loff_t, start + len, mtd->size); - size_t chunk; - int rc = 0; - - if (!part->updating) { - rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); - if (rc) - goto out; - part->updating = true; - } - - while (offset < end) { - chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); - rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, - buffer, chunk); - if (rc) - goto out; - offset += chunk; - buffer += chunk; - } -out: - *retlen = offset - start; - return rc; -} - -static int siena_mtd_sync(struct mtd_info *mtd) -{ - struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); - struct efx_nic *efx = mtd->priv; - int rc = 0; - - if (part->updating) { - part->updating = false; - rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); - } - - return rc; + efx->type->mtd_rename(part); } - -static const struct efx_mtd_ops siena_mtd_ops = { - .rename = siena_mtd_rename, - .read = siena_mtd_read, - .erase = siena_mtd_erase, - .write = siena_mtd_write, - .sync = siena_mtd_sync, -}; - -struct siena_nvram_type_info { - int port; - const char *name; -}; - -static const struct siena_nvram_type_info siena_nvram_types[] = { - [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, - [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, - [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, - [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, - [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, - [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, - [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, - [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, - [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, - [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, - [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, - [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, - [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" }, -}; - -static int siena_mtd_probe_partition(struct efx_nic *efx, - struct efx_mcdi_mtd_partition *part, - unsigned int type) -{ - const struct siena_nvram_type_info *info; - size_t size, erase_size; - bool protected; - int rc; - - if (type >= ARRAY_SIZE(siena_nvram_types) || - siena_nvram_types[type].name == NULL) - return -ENODEV; - - info = &siena_nvram_types[type]; - - if (info->port != efx_port_num(efx)) - return -ENODEV; - - rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); - if (rc) - return rc; - if (protected) - return -ENODEV; /* hide it */ - - part->nvram_type = type; - part->common.dev_type_name = "Siena NVRAM manager"; - part->common.type_name = info->name; - - part->common.mtd.type = MTD_NORFLASH; - part->common.mtd.flags = MTD_CAP_NORFLASH; - part->common.mtd.size = size; - part->common.mtd.erasesize = erase_size; - - return 0; -} - -static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, - struct efx_mcdi_mtd_partition *parts, - size_t n_parts) -{ - uint16_t fw_subtype_list[ - MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; - size_t i; - int rc; - - rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); - if (rc) - return rc; - - for (i = 0; i < n_parts; i++) - parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type]; - - return 0; -} - -static int siena_mtd_probe(struct efx_nic *efx) -{ - struct efx_mcdi_mtd_partition *parts; - u32 nvram_types; - unsigned int type; - size_t n_parts; - int rc; - - ASSERT_RTNL(); - - efx->mtd_ops = &siena_mtd_ops; - - rc = efx_mcdi_nvram_types(efx, &nvram_types); - if (rc) - return rc; - - parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL); - if (!parts) - return -ENOMEM; - - type = 0; - n_parts = 0; - - while (nvram_types != 0) { - if (nvram_types & 1) { - rc = siena_mtd_probe_partition(efx, &parts[n_parts], - type); - if (rc == 0) - n_parts++; - else if (rc != -ENODEV) - goto fail; - } - type++; - nvram_types >>= 1; - } - - rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts); - if (rc) - goto fail; - - rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); -fail: - if (rc) - kfree(parts); - return rc; -} - diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 958ede1..fa881cd 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -27,6 +27,7 @@ #include #include #include +#include #include "enum.h" #include "bitfield.h" @@ -868,7 +869,6 @@ struct efx_nic { struct delayed_work selftest_work; #ifdef CONFIG_SFC_MTD - const struct efx_mtd_ops *mtd_ops; struct list_head mtd_list; #endif @@ -954,6 +954,14 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) return efx->port_num; } +struct efx_mtd_partition { + struct list_head node; + struct mtd_info mtd; + const char *dev_type_name; + const char *type_name; + char name[IFNAMSIZ + 20]; +}; + /** * struct efx_nic_type - Efx device type definition * @mem_map_size: Get memory BAR mapped size @@ -1047,6 +1055,15 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. * This must check whether the specified table entry is used by RFS * and that rps_may_expire_flow() returns true for it. + * @mtd_probe: Probe and add MTD partitions associated with this net device, + * using efx_mtd_add() + * @mtd_rename: Set an MTD partition name using the net device name + * @mtd_read: Read from an MTD partition + * @mtd_erase: Erase part of an MTD partition + * @mtd_write: Write to an MTD partition + * @mtd_sync: Wait for write-back to complete on MTD partition. This + * also notifies the driver that a writer has finished using this + * partition. * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1150,6 +1167,16 @@ struct efx_nic_type { bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); #endif +#ifdef CONFIG_SFC_MTD + int (*mtd_probe)(struct efx_nic *efx); + void (*mtd_rename)(struct efx_mtd_partition *part); + int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); + int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); + int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); + int (*mtd_sync)(struct mtd_info *mtd); +#endif int revision; unsigned int txd_ptr_tbl_base; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index b90dc8a..2b84aeb 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -16,7 +16,6 @@ #include "net_driver.h" #include "efx.h" #include "mcdi.h" -#include "spi.h" /* * Falcon hardware control @@ -164,6 +163,38 @@ struct falcon_board { }; /** + * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device + * @device_id: Controller's id for the device + * @size: Size (in bytes) + * @addr_len: Number of address bytes in read/write commands + * @munge_address: Flag whether addresses should be munged. + * Some devices with 9-bit addresses (e.g. AT25040A EEPROM) + * use bit 3 of the command byte as address bit A8, rather + * than having a two-byte address. If this flag is set, then + * commands should be munged in this way. + * @erase_command: Erase command (or 0 if sector erase not needed). + * @erase_size: Erase sector size (in bytes) + * Erase commands affect sectors with this size and alignment. + * This must be a power of two. + * @block_size: Write block size (in bytes). + * Write commands are limited to blocks with this size and alignment. + */ +struct falcon_spi_device { + int device_id; + unsigned int size; + unsigned int addr_len; + unsigned int munge_address:1; + u8 erase_command; + unsigned int erase_size; + unsigned int block_size; +}; + +static inline bool falcon_spi_present(const struct falcon_spi_device *spi) +{ + return spi->size != 0; +} + +/** * struct falcon_nic_data - Falcon NIC state * @pci_dev2: Secondary function of Falcon A * @board: Board state and functions diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 1be81e4..11b4739 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -18,7 +18,6 @@ #include "bitfield.h" #include "efx.h" #include "nic.h" -#include "spi.h" #include "farch_regs.h" #include "io.h" #include "phy.h" @@ -674,6 +673,138 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx) /************************************************************************** * + * MTD + * + ************************************************************************** + */ + +#ifdef CONFIG_SFC_MTD + +struct siena_nvram_type_info { + int port; + const char *name; +}; + +static const struct siena_nvram_type_info siena_nvram_types[] = { + [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, + [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, + [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" }, +}; + +static int siena_mtd_probe_partition(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *part, + unsigned int type) +{ + const struct siena_nvram_type_info *info; + size_t size, erase_size; + bool protected; + int rc; + + if (type >= ARRAY_SIZE(siena_nvram_types) || + siena_nvram_types[type].name == NULL) + return -ENODEV; + + info = &siena_nvram_types[type]; + + if (info->port != efx_port_num(efx)) + return -ENODEV; + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); + if (rc) + return rc; + if (protected) + return -ENODEV; /* hide it */ + + part->nvram_type = type; + part->common.dev_type_name = "Siena NVRAM manager"; + part->common.type_name = info->name; + + part->common.mtd.type = MTD_NORFLASH; + part->common.mtd.flags = MTD_CAP_NORFLASH; + part->common.mtd.size = size; + part->common.mtd.erasesize = erase_size; + + return 0; +} + +static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *parts, + size_t n_parts) +{ + uint16_t fw_subtype_list[ + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; + size_t i; + int rc; + + rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); + if (rc) + return rc; + + for (i = 0; i < n_parts; i++) + parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type]; + + return 0; +} + +static int siena_mtd_probe(struct efx_nic *efx) +{ + struct efx_mcdi_mtd_partition *parts; + u32 nvram_types; + unsigned int type; + size_t n_parts; + int rc; + + ASSERT_RTNL(); + + rc = efx_mcdi_nvram_types(efx, &nvram_types); + if (rc) + return rc; + + parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + type = 0; + n_parts = 0; + + while (nvram_types != 0) { + if (nvram_types & 1) { + rc = siena_mtd_probe_partition(efx, &parts[n_parts], + type); + if (rc == 0) + n_parts++; + else if (rc != -ENODEV) + goto fail; + } + type++; + nvram_types >>= 1; + } + + rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts); + if (rc) + goto fail; + + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); +fail: + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + +/************************************************************************** + * * Revision-dependent attributes used by efx.c and nic.c * ************************************************************************** @@ -753,6 +884,14 @@ const struct efx_nic_type siena_a0_nic_type = { .filter_rfs_insert = efx_farch_filter_rfs_insert, .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, #endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = siena_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif .revision = EFX_REV_SIENA_A0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h deleted file mode 100644 index ee951feb..0000000 --- a/drivers/net/ethernet/sfc/spi.h +++ /dev/null @@ -1,99 +0,0 @@ -/**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2005 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation, incorporated herein by reference. - */ - -#ifndef EFX_SPI_H -#define EFX_SPI_H - -#include "net_driver.h" - -/************************************************************************** - * - * Basic SPI command set and bit definitions - * - *************************************************************************/ - -#define SPI_WRSR 0x01 /* Write status register */ -#define SPI_WRITE 0x02 /* Write data to memory array */ -#define SPI_READ 0x03 /* Read data from memory array */ -#define SPI_WRDI 0x04 /* Reset write enable latch */ -#define SPI_RDSR 0x05 /* Read status register */ -#define SPI_WREN 0x06 /* Set write enable latch */ -#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */ - -#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */ -#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */ -#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */ -#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */ -#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */ -#define SPI_STATUS_NRDY 0x01 /* Device busy flag */ - -/** - * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device - * @device_id: Controller's id for the device - * @size: Size (in bytes) - * @addr_len: Number of address bytes in read/write commands - * @munge_address: Flag whether addresses should be munged. - * Some devices with 9-bit addresses (e.g. AT25040A EEPROM) - * use bit 3 of the command byte as address bit A8, rather - * than having a two-byte address. If this flag is set, then - * commands should be munged in this way. - * @erase_command: Erase command (or 0 if sector erase not needed). - * @erase_size: Erase sector size (in bytes) - * Erase commands affect sectors with this size and alignment. - * This must be a power of two. - * @block_size: Write block size (in bytes). - * Write commands are limited to blocks with this size and alignment. - */ -struct falcon_spi_device { - int device_id; - unsigned int size; - unsigned int addr_len; - unsigned int munge_address:1; - u8 erase_command; - unsigned int erase_size; - unsigned int block_size; -}; - -static inline bool falcon_spi_present(const struct falcon_spi_device *spi) -{ - return spi->size != 0; -} - -int falcon_spi_cmd(struct efx_nic *efx, - const struct falcon_spi_device *spi, unsigned int command, - int address, const void *in, void *out, size_t len); -int falcon_spi_wait_write(struct efx_nic *efx, - const struct falcon_spi_device *spi); -int falcon_spi_read(struct efx_nic *efx, - const struct falcon_spi_device *spi, loff_t start, - size_t len, size_t *retlen, u8 *buffer); -int falcon_spi_write(struct efx_nic *efx, - const struct falcon_spi_device *spi, loff_t start, - size_t len, size_t *retlen, const u8 *buffer); - -/* - * SFC4000 flash is partitioned into: - * 0-0x400 chip and board config (see falcon_hwdefs.h) - * 0x400-0x8000 unused (or may contain VPD if EEPROM not present) - * 0x8000-end boot code (mapped to PCI expansion ROM) - * SFC4000 small EEPROM (size < 0x400) is used for VPD only. - * SFC4000 large EEPROM (size >= 0x400) is partitioned into: - * 0-0x400 chip and board config - * configurable VPD - * 0x800-0x1800 boot config - * Aside from the chip and board config, all of these are optional and may - * be absent or truncated depending on the devices used. - */ -#define FALCON_NVCONFIG_END 0x400U -#define FALCON_FLASH_BOOTCODE_START 0x8000U -#define FALCON_EEPROM_BOOTCONFIG_START 0x800U -#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U - -#endif /* EFX_SPI_H */ -- cgit v0.10.2 From e51361249b84de8c2e5dc505bfd91e9ccb07bf0d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Dec 2012 21:52:56 +0000 Subject: sfc: Remove more left-overs from Falcon GMAC support We only ever used the XMAC (10G link speed) in production. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 58ae28b..55c3826 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -205,8 +205,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev, efx->phy_op->get_settings(efx, ecmd); mutex_unlock(&efx->mac_lock); - /* GMAC does not support 1000Mbps HD */ - ecmd->supported &= ~SUPPORTED_1000baseT_Half; /* Both MACs support pause frames (bidirectional and respond-only) */ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 3c99b6c..76a9e0d 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -131,8 +131,8 @@ #define XgDmaDone_offset 0xD4 #define XgDmaDone_WIDTH 32 -#define FALCON_STATS_NOT_DONE 0x00000000 -#define FALCON_STATS_DONE 0xffffffff +#define FALCON_XMAC_STATS_DMA_FLAG(efx) \ + (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset)) #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) @@ -1398,10 +1398,7 @@ static void falcon_stats_request(struct efx_nic *efx) WARN_ON(nic_data->stats_pending); WARN_ON(nic_data->stats_disable_count); - if (nic_data->stats_dma_done == NULL) - return; /* no mac selected */ - - *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE; + FALCON_XMAC_STATS_DMA_FLAG(efx) = 0; nic_data->stats_pending = true; wmb(); /* ensure done flag is clear */ @@ -1423,7 +1420,7 @@ static void falcon_stats_complete(struct efx_nic *efx) return; nic_data->stats_pending = false; - if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { + if (FALCON_XMAC_STATS_DMA_FLAG(efx)) { rmb(); /* read the done flag before the stats */ falcon_update_stats_xmac(efx); } else { @@ -1706,7 +1703,6 @@ static int falcon_probe_port(struct efx_nic *efx) (u64)efx->stats_buffer.dma_addr, efx->stats_buffer.addr, (u64)virt_to_phys(efx->stats_buffer.addr)); - nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset; return 0; } @@ -2554,8 +2550,7 @@ static void falcon_update_nic_stats(struct efx_nic *efx) efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); - if (nic_data->stats_pending && - *nic_data->stats_dma_done == FALCON_STATS_DONE) { + if (nic_data->stats_pending && FALCON_XMAC_STATS_DMA_FLAG(efx)) { nic_data->stats_pending = false; rmb(); /* read the done flag before the stats */ falcon_update_stats_xmac(efx); @@ -2588,7 +2583,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx) /* Wait enough time for the most recent transfer to * complete. */ for (i = 0; i < 4 && nic_data->stats_pending; i++) { - if (*nic_data->stats_dma_done == FALCON_STATS_DONE) + if (FALCON_XMAC_STATS_DMA_FLAG(efx)) break; msleep(1); } diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 2b84aeb..c3e0f1f 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -117,9 +117,6 @@ enum { (1 << LOOPBACK_XGXS) | \ (1 << LOOPBACK_XAUI)) -#define FALCON_GMAC_LOOPBACKS \ - (1 << LOOPBACK_GMAC) - /* Alignment of PCIe DMA boundaries (4KB) */ #define EFX_PAGE_SIZE 4096 /* Size and alignment of buffer table entries (same) */ @@ -201,7 +198,6 @@ static inline bool falcon_spi_present(const struct falcon_spi_device *spi) * @stats_disable_count: Nest count for disabling statistics fetches * @stats_pending: Is there a pending DMA of MAC statistics. * @stats_timer: A timer for regularly fetching MAC statistics. - * @stats_dma_done: Pointer to the flag which indicates DMA completion. * @spi_flash: SPI flash device * @spi_eeprom: SPI EEPROM device * @spi_lock: SPI bus lock @@ -214,7 +210,6 @@ struct falcon_nic_data { unsigned int stats_disable_count; bool stats_pending; struct timer_list stats_timer; - u32 *stats_dma_done; struct falcon_spi_device spi_flash; struct falcon_spi_device spi_eeprom; struct mutex spi_lock; -- cgit v0.10.2 From b681e57c38d81eee45f8bd7465d2a2af872800e3 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Dec 2012 22:18:55 +0000 Subject: sfc: Remove driver-local struct ethtool_string It's not really helpful to pretend ethtool string arrays are structured. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 55c3826..86b4391 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -19,10 +19,6 @@ #include "filter.h" #include "nic.h" -struct ethtool_string { - char name[ETH_GSTRING_LEN]; -}; - struct efx_ethtool_stat { const char *name; enum { @@ -289,12 +285,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) * * Fill in an individual self-test entry. */ -static void efx_fill_test(unsigned int test_index, - struct ethtool_string *strings, u64 *data, +static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, int *test, const char *unit_format, int unit_id, const char *test_format, const char *test_id) { - struct ethtool_string unit_str, test_str; + char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN]; /* Fill data value, if applicable */ if (data) @@ -303,15 +298,14 @@ static void efx_fill_test(unsigned int test_index, /* Fill string, if applicable */ if (strings) { if (strchr(unit_format, '%')) - snprintf(unit_str.name, sizeof(unit_str.name), + snprintf(unit_str, sizeof(unit_str), unit_format, unit_id); else - strcpy(unit_str.name, unit_format); - snprintf(test_str.name, sizeof(test_str.name), - test_format, test_id); - snprintf(strings[test_index].name, - sizeof(strings[test_index].name), - "%-6s %-24s", unit_str.name, test_str.name); + strcpy(unit_str, unit_format); + snprintf(test_str, sizeof(test_str), test_format, test_id); + snprintf(strings + test_index * ETH_GSTRING_LEN, + ETH_GSTRING_LEN, + "%-6s %-24s", unit_str, test_str); } } @@ -334,7 +328,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx, struct efx_loopback_self_tests *lb_tests, enum efx_loopback_mode mode, unsigned int test_index, - struct ethtool_string *strings, u64 *data) + u8 *strings, u64 *data) { struct efx_channel *channel = efx_get_channel(efx, efx->tx_channel_offset); @@ -371,8 +365,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx, */ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, struct efx_self_tests *tests, - struct ethtool_string *strings, - u64 *data) + u8 *strings, u64 *data) { struct efx_channel *channel; unsigned int n = 0, i; @@ -446,20 +439,16 @@ static void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, u8 *strings) { struct efx_nic *efx = netdev_priv(net_dev); - struct ethtool_string *ethtool_strings = - (struct ethtool_string *)strings; int i; switch (string_set) { case ETH_SS_STATS: for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) - strlcpy(ethtool_strings[i].name, - efx_ethtool_stats[i].name, - sizeof(ethtool_strings[i].name)); + strlcpy(strings + i * ETH_GSTRING_LEN, + efx_ethtool_stats[i].name, ETH_GSTRING_LEN); break; case ETH_SS_TEST: - efx_ethtool_fill_self_tests(efx, NULL, - ethtool_strings, NULL); + efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); break; default: /* No other string sets */ -- cgit v0.10.2 From cd0ecc9a6d279c8c5c5336f576330c45f5c80939 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 14 Dec 2012 21:52:56 +0000 Subject: sfc: Delegate MAC/NIC statistic description to efx_nic_type Various hardware statistics that are available for Siena are unavailable or meaningless for Falcon. Huntington adds further to the NIC-type-specific statistics, as it has different MAC blocks from Falcon/Siena. All NIC types still provide most statistics by DMA, and use little-endian byte order. Therefore: 1. Add some general utility functions for reporting hardware statistics, efx_nic_describe_stats() and efx_nic_update_stats(). 2. Add an efx_nic_type::describe_stats operation to get the number and names of statistics, implemented using efx_nic_describe_stats() 3. Change efx_nic_type::update_stats to store the core statistics (struct rtnl_link_stats64) or full statistics (array of u64) in a caller-provided buffer. Use efx_nic_update_stats() to aid in the implementation. 4. Rename struct efx_ethtool_stat to struct efx_sw_stat_desc and EFX_ETHTOOL_NUM_STATS to EFX_ETHTOOL_SW_STAT_COUNT. 5. Remove efx_nic::mac_stats and struct efx_mac_stats. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 1d4f388..9fe375f 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1920,34 +1920,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_mac_stats *mac_stats = &efx->mac_stats; spin_lock_bh(&efx->stats_lock); - - efx->type->update_stats(efx); - - stats->rx_packets = mac_stats->rx_packets; - stats->tx_packets = mac_stats->tx_packets; - stats->rx_bytes = mac_stats->rx_bytes; - stats->tx_bytes = mac_stats->tx_bytes; - stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; - stats->multicast = mac_stats->rx_multicast; - stats->collisions = mac_stats->tx_collision; - stats->rx_length_errors = (mac_stats->rx_gtjumbo + - mac_stats->rx_length_error); - stats->rx_crc_errors = mac_stats->rx_bad; - stats->rx_frame_errors = mac_stats->rx_align_error; - stats->rx_fifo_errors = mac_stats->rx_overflow; - stats->rx_missed_errors = mac_stats->rx_missed; - stats->tx_window_errors = mac_stats->tx_late_collision; - - stats->rx_errors = (stats->rx_length_errors + - stats->rx_crc_errors + - stats->rx_frame_errors + - mac_stats->rx_symbol_error); - stats->tx_errors = (stats->tx_window_errors + - mac_stats->tx_bad); - + efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); return stats; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 86b4391..0b8ffdf 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -19,10 +19,9 @@ #include "filter.h" #include "nic.h" -struct efx_ethtool_stat { +struct efx_sw_stat_desc { const char *name; enum { - EFX_ETHTOOL_STAT_SOURCE_mac_stats, EFX_ETHTOOL_STAT_SOURCE_nic, EFX_ETHTOOL_STAT_SOURCE_channel, EFX_ETHTOOL_STAT_SOURCE_tx_queue @@ -31,7 +30,7 @@ struct efx_ethtool_stat { u64(*get_stat) (void *field); /* Reader function */ }; -/* Initialiser for a struct #efx_ethtool_stat with type-checking */ +/* Initialiser for a struct efx_sw_stat_desc with type-checking */ #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ get_stat_function) { \ .name = #stat_name, \ @@ -48,24 +47,11 @@ static u64 efx_get_uint_stat(void *field) return *(unsigned int *)field; } -static u64 efx_get_u64_stat(void *field) -{ - return *(u64 *) field; -} - static u64 efx_get_atomic_stat(void *field) { return atomic_read((atomic_t *) field); } -#define EFX_ETHTOOL_U64_MAC_STAT(field) \ - EFX_ETHTOOL_STAT(field, mac_stats, field, \ - u64, efx_get_u64_stat) - -#define EFX_ETHTOOL_UINT_NIC_STAT(name) \ - EFX_ETHTOOL_STAT(name, nic, n_##name, \ - unsigned int, efx_get_uint_stat) - #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ EFX_ETHTOOL_STAT(field, nic, field, \ atomic_t, efx_get_atomic_stat) @@ -78,72 +64,11 @@ static u64 efx_get_atomic_stat(void *field) EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ unsigned int, efx_get_uint_stat) -static const struct efx_ethtool_stat efx_ethtool_stats[] = { - EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), - EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), - EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), - EFX_ETHTOOL_U64_MAC_STAT(tx_packets), - EFX_ETHTOOL_U64_MAC_STAT(tx_bad), - EFX_ETHTOOL_U64_MAC_STAT(tx_pause), - EFX_ETHTOOL_U64_MAC_STAT(tx_control), - EFX_ETHTOOL_U64_MAC_STAT(tx_unicast), - EFX_ETHTOOL_U64_MAC_STAT(tx_multicast), - EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast), - EFX_ETHTOOL_U64_MAC_STAT(tx_lt64), - EFX_ETHTOOL_U64_MAC_STAT(tx_64), - EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127), - EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255), - EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511), - EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023), - EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx), - EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo), - EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo), - EFX_ETHTOOL_U64_MAC_STAT(tx_collision), - EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision), - EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision), - EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision), - EFX_ETHTOOL_U64_MAC_STAT(tx_deferred), - EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision), - EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred), - EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp), - EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error), - EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error), +static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), EFX_ETHTOOL_UINT_TXQ_STAT(pushes), - EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), - EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), - EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), - EFX_ETHTOOL_U64_MAC_STAT(rx_packets), - EFX_ETHTOOL_U64_MAC_STAT(rx_good), - EFX_ETHTOOL_U64_MAC_STAT(rx_bad), - EFX_ETHTOOL_U64_MAC_STAT(rx_pause), - EFX_ETHTOOL_U64_MAC_STAT(rx_control), - EFX_ETHTOOL_U64_MAC_STAT(rx_unicast), - EFX_ETHTOOL_U64_MAC_STAT(rx_multicast), - EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast), - EFX_ETHTOOL_U64_MAC_STAT(rx_lt64), - EFX_ETHTOOL_U64_MAC_STAT(rx_64), - EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127), - EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255), - EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511), - EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023), - EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx), - EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo), - EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo), - EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64), - EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx), - EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo), - EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo), - EFX_ETHTOOL_U64_MAC_STAT(rx_overflow), - EFX_ETHTOOL_U64_MAC_STAT(rx_missed), - EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier), - EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error), - EFX_ETHTOOL_U64_MAC_STAT(rx_align_error), - EFX_ETHTOOL_U64_MAC_STAT(rx_length_error), - EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error), - EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), @@ -153,8 +78,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), }; -/* Number of ethtool statistics */ -#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) +#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB @@ -424,12 +348,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, static int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) { + struct efx_nic *efx = netdev_priv(net_dev); + switch (string_set) { case ETH_SS_STATS: - return EFX_ETHTOOL_NUM_STATS; + return efx->type->describe_stats(efx, NULL) + + EFX_ETHTOOL_SW_STAT_COUNT; case ETH_SS_TEST: - return efx_ethtool_fill_self_tests(netdev_priv(net_dev), - NULL, NULL, NULL); + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); default: return -EINVAL; } @@ -443,9 +369,11 @@ static void efx_ethtool_get_strings(struct net_device *net_dev, switch (string_set) { case ETH_SS_STATS: - for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) + strings += (efx->type->describe_stats(efx, strings) * + ETH_GSTRING_LEN); + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) strlcpy(strings + i * ETH_GSTRING_LEN, - efx_ethtool_stats[i].name, ETH_GSTRING_LEN); + efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); @@ -461,27 +389,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_mac_stats *mac_stats = &efx->mac_stats; - const struct efx_ethtool_stat *stat; + const struct efx_sw_stat_desc *stat; struct efx_channel *channel; struct efx_tx_queue *tx_queue; int i; - EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); - spin_lock_bh(&efx->stats_lock); - /* Update MAC and NIC statistics */ - efx->type->update_stats(efx); + /* Get NIC statistics */ + data += efx->type->update_stats(efx, data, NULL); - /* Fill detailed statistics buffer */ - for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { - stat = &efx_ethtool_stats[i]; + /* Get software statistics */ + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) { + stat = &efx_sw_stat_desc[i]; switch (stat->source) { - case EFX_ETHTOOL_STAT_SOURCE_mac_stats: - data[i] = stat->get_stat((void *)mac_stats + - stat->offset); - break; case EFX_ETHTOOL_STAT_SOURCE_nic: data[i] = stat->get_stat((void *)efx + stat->offset); break; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 76a9e0d..0be1f37 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -31,7 +31,7 @@ /************************************************************************** * - * MAC stats DMA format + * NIC stats * ************************************************************************** */ @@ -134,27 +134,67 @@ #define FALCON_XMAC_STATS_DMA_FLAG(efx) \ (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset)) -#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) -#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) - -/* Retrieve statistic from statistics block */ -#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ - if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ - (efx)->mac_stats.efx_stat += le16_to_cpu( \ - *((__force __le16 *) \ - (efx->stats_buffer.addr + \ - FALCON_STAT_OFFSET(falcon_stat)))); \ - else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ - (efx)->mac_stats.efx_stat += le32_to_cpu( \ - *((__force __le32 *) \ - (efx->stats_buffer.addr + \ - FALCON_STAT_OFFSET(falcon_stat)))); \ - else \ - (efx)->mac_stats.efx_stat += le64_to_cpu( \ - *((__force __le64 *) \ - (efx->stats_buffer.addr + \ - FALCON_STAT_OFFSET(falcon_stat)))); \ - } while (0) +#define FALCON_DMA_STAT(ext_name, hw_name) \ + [FALCON_STAT_ ## ext_name] = \ + { #ext_name, \ + /* 48-bit stats are zero-padded to 64 on DMA */ \ + hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \ + hw_name ## _ ## offset } +#define FALCON_OTHER_STAT(ext_name) \ + [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = { + FALCON_DMA_STAT(tx_bytes, XgTxOctets), + FALCON_DMA_STAT(tx_packets, XgTxPkts), + FALCON_DMA_STAT(tx_pause, XgTxPausePkts), + FALCON_DMA_STAT(tx_control, XgTxControlPkts), + FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts), + FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts), + FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts), + FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts), + FALCON_DMA_STAT(tx_64, XgTxPkts64Octets), + FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets), + FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets), + FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets), + FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets), + FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets), + FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets), + FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts), + FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt), + FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt), + FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt), + FALCON_DMA_STAT(rx_bytes, XgRxOctets), + FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK), + FALCON_OTHER_STAT(rx_bad_bytes), + FALCON_DMA_STAT(rx_packets, XgRxPkts), + FALCON_DMA_STAT(rx_good, XgRxPktsOK), + FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts), + FALCON_DMA_STAT(rx_pause, XgRxPausePkts), + FALCON_DMA_STAT(rx_control, XgRxControlPkts), + FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts), + FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts), + FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts), + FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts), + FALCON_DMA_STAT(rx_64, XgRxPkts64Octets), + FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets), + FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets), + FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets), + FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets), + FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets), + FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets), + FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts), + FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts), + FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts), + FALCON_DMA_STAT(rx_overflow, XgRxDropEvents), + FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError), + FALCON_DMA_STAT(rx_align_error, XgRxAlignError), + FALCON_DMA_STAT(rx_length_error, XgRxLengthError), + FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError), + FALCON_OTHER_STAT(rx_nodesc_drop_cnt), +}; +static const unsigned long falcon_stat_mask[] = { + [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL, +}; /************************************************************************** * @@ -1159,66 +1199,6 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx) return 0; } -static void falcon_update_stats_xmac(struct efx_nic *efx) -{ - struct efx_mac_stats *mac_stats = &efx->mac_stats; - - /* Update MAC stats from DMAed values */ - FALCON_STAT(efx, XgRxOctets, rx_bytes); - FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes); - FALCON_STAT(efx, XgRxPkts, rx_packets); - FALCON_STAT(efx, XgRxPktsOK, rx_good); - FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast); - FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast); - FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast); - FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64); - FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo); - FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo); - FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64); - FALCON_STAT(efx, XgRxDropEvents, rx_overflow); - FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad); - FALCON_STAT(efx, XgRxAlignError, rx_align_error); - FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error); - FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error); - FALCON_STAT(efx, XgRxControlPkts, rx_control); - FALCON_STAT(efx, XgRxPausePkts, rx_pause); - FALCON_STAT(efx, XgRxPkts64Octets, rx_64); - FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127); - FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255); - FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511); - FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023); - FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx); - FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo); - FALCON_STAT(efx, XgRxLengthError, rx_length_error); - FALCON_STAT(efx, XgTxPkts, tx_packets); - FALCON_STAT(efx, XgTxOctets, tx_bytes); - FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast); - FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast); - FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast); - FALCON_STAT(efx, XgTxControlPkts, tx_control); - FALCON_STAT(efx, XgTxPausePkts, tx_pause); - FALCON_STAT(efx, XgTxPkts64Octets, tx_64); - FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127); - FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255); - FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511); - FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023); - FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx); - FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo); - FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64); - FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo); - FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp); - FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error); - FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); - - /* Update derived statistics */ - efx_update_diff_stat(&mac_stats->tx_good_bytes, - mac_stats->tx_bytes - mac_stats->tx_bad_bytes - - mac_stats->tx_control * 64); - efx_update_diff_stat(&mac_stats->rx_bad_bytes, - mac_stats->rx_bytes - mac_stats->rx_good_bytes - - mac_stats->rx_control * 64); -} - static void falcon_poll_xmac(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; @@ -1422,7 +1402,9 @@ static void falcon_stats_complete(struct efx_nic *efx) nic_data->stats_pending = false; if (FALCON_XMAC_STATS_DMA_FLAG(efx)) { rmb(); /* read the done flag before the stats */ - falcon_update_stats_xmac(efx); + efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT, + falcon_stat_mask, nic_data->stats, + efx->stats_buffer.addr, true); } else { netif_err(efx, hw, efx->net_dev, "timed out waiting for statistics\n"); @@ -2538,23 +2520,65 @@ static void falcon_remove_nic(struct efx_nic *efx) efx->nic_data = NULL; } -static void falcon_update_nic_stats(struct efx_nic *efx) +static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names) +{ + return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT, + falcon_stat_mask, names); +} + +static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) { struct falcon_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; efx_oword_t cnt; - if (nic_data->stats_disable_count) - return; + if (!nic_data->stats_disable_count) { + efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); + stats[FALCON_STAT_rx_nodesc_drop_cnt] += + EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); + + if (nic_data->stats_pending && + FALCON_XMAC_STATS_DMA_FLAG(efx)) { + nic_data->stats_pending = false; + rmb(); /* read the done flag before the stats */ + efx_nic_update_stats( + falcon_stat_desc, FALCON_STAT_COUNT, + falcon_stat_mask, + stats, efx->stats_buffer.addr, true); + } - efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); - efx->n_rx_nodesc_drop_cnt += - EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); + /* Update derived statistic */ + efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes], + stats[FALCON_STAT_rx_bytes] - + stats[FALCON_STAT_rx_good_bytes] - + stats[FALCON_STAT_rx_control] * 64); + } - if (nic_data->stats_pending && FALCON_XMAC_STATS_DMA_FLAG(efx)) { - nic_data->stats_pending = false; - rmb(); /* read the done flag before the stats */ - falcon_update_stats_xmac(efx); + if (full_stats) + memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT); + + if (core_stats) { + core_stats->rx_packets = stats[FALCON_STAT_rx_packets]; + core_stats->tx_packets = stats[FALCON_STAT_tx_packets]; + core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes]; + core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes]; + core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt]; + core_stats->multicast = stats[FALCON_STAT_rx_multicast]; + core_stats->rx_length_errors = + stats[FALCON_STAT_rx_gtjumbo] + + stats[FALCON_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow]; + + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors + + stats[FALCON_STAT_rx_symbol_error]); } + + return FALCON_STAT_COUNT; } void falcon_start_nic_stats(struct efx_nic *efx) @@ -2643,6 +2667,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = falcon_prepare_flush, .finish_flush = efx_port_dummy_op_void, + .describe_stats = falcon_describe_nic_stats, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, @@ -2735,6 +2760,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = falcon_prepare_flush, .finish_flush = efx_port_dummy_op_void, + .describe_stats = falcon_describe_nic_stats, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 67b07de..ff90ebf 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -236,21 +236,10 @@ static int efx_mcdi_poll(struct efx_nic *efx) */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { - int rc; - if (!efx->mcdi) return 0; - rc = efx->type->mcdi_poll_reboot(efx); - if (!rc) - return 0; - - /* MAC statistics have been cleared on the NIC; clear our copy - * so that efx_update_diff_stat() can continue to work. - */ - memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); - - return rc; + return efx->type->mcdi_poll_reboot(efx); } static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index fa881cd..d33656b 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -595,75 +595,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) return !!(mode & ~PHY_MODE_TX_DISABLED); } -/* - * Efx extended statistics - * - * Not all statistics are provided by all supported MACs. The purpose - * is this structure is to contain the raw statistics provided by each - * MAC. +/** + * struct efx_hw_stat_desc - Description of a hardware statistic + * @name: Name of the statistic as visible through ethtool, or %NULL if + * it should not be exposed + * @dma_width: Width in bits (0 for non-DMA statistics) + * @offset: Offset within stats (ignored for non-DMA statistics) */ -struct efx_mac_stats { - u64 tx_bytes; - u64 tx_good_bytes; - u64 tx_bad_bytes; - u64 tx_packets; - u64 tx_bad; - u64 tx_pause; - u64 tx_control; - u64 tx_unicast; - u64 tx_multicast; - u64 tx_broadcast; - u64 tx_lt64; - u64 tx_64; - u64 tx_65_to_127; - u64 tx_128_to_255; - u64 tx_256_to_511; - u64 tx_512_to_1023; - u64 tx_1024_to_15xx; - u64 tx_15xx_to_jumbo; - u64 tx_gtjumbo; - u64 tx_collision; - u64 tx_single_collision; - u64 tx_multiple_collision; - u64 tx_excessive_collision; - u64 tx_deferred; - u64 tx_late_collision; - u64 tx_excessive_deferred; - u64 tx_non_tcpudp; - u64 tx_mac_src_error; - u64 tx_ip_src_error; - u64 rx_bytes; - u64 rx_good_bytes; - u64 rx_bad_bytes; - u64 rx_packets; - u64 rx_good; - u64 rx_bad; - u64 rx_pause; - u64 rx_control; - u64 rx_unicast; - u64 rx_multicast; - u64 rx_broadcast; - u64 rx_lt64; - u64 rx_64; - u64 rx_65_to_127; - u64 rx_128_to_255; - u64 rx_256_to_511; - u64 rx_512_to_1023; - u64 rx_1024_to_15xx; - u64 rx_15xx_to_jumbo; - u64 rx_gtjumbo; - u64 rx_bad_lt64; - u64 rx_bad_64_to_15xx; - u64 rx_bad_15xx_to_jumbo; - u64 rx_bad_gtjumbo; - u64 rx_overflow; - u64 rx_missed; - u64 rx_false_carrier; - u64 rx_symbol_error; - u64 rx_align_error; - u64 rx_length_error; - u64 rx_internal_error; - u64 rx_good_lt64; +struct efx_hw_stat_desc { + const char *name; + u16 dma_width; + u16 offset; }; /* Number of bits used in a multicast filter hash address */ @@ -795,12 +737,8 @@ struct vfdi_status; * @last_irq_cpu: Last CPU to handle a possible test interrupt. This * field is used by efx_test_interrupts() to verify that an * interrupt has occurred. - * @n_rx_nodesc_drop_cnt: RX no descriptor drop count - * @mac_stats: MAC statistics. These include all statistics the MACs - * can provide. Generic code converts these into a standard - * &struct net_device_stats. - * @stats_lock: Statistics update lock. Serialises statistics fetches - * and access to @mac_stats. + * @stats_lock: Statistics update lock. Must be held when calling + * efx_nic_type::{update,start,stop}_stats. * * This is stored in the private area of the &struct net_device. */ @@ -939,8 +877,6 @@ struct efx_nic { struct delayed_work monitor_work ____cacheline_aligned_in_smp; spinlock_t biu_lock; int last_irq_cpu; - unsigned n_rx_nodesc_drop_cnt; - struct efx_mac_stats mac_stats; spinlock_t stats_lock; }; @@ -984,7 +920,9 @@ struct efx_mtd_partition { * (for Falcon architecture) * @finish_flush: Clean up after flushing the DMA queues (for Falcon * architecture) - * @update_stats: Update statistics not provided by event handling + * @describe_stats: Describe statistics for ethtool + * @update_stats: Update statistics not provided by event handling. + * Either argument may be %NULL. * @start_stats: Start the regular fetching of statistics * @stop_stats: Stop the regular fetching of statistics * @set_id_led: Set state of identifying LED or revert to automatic function @@ -1098,7 +1036,9 @@ struct efx_nic_type { int (*fini_dmaq)(struct efx_nic *efx); void (*prepare_flush)(struct efx_nic *efx); void (*finish_flush)(struct efx_nic *efx); - void (*update_stats)(struct efx_nic *efx); + size_t (*describe_stats)(struct efx_nic *efx, u8 *names); + size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats); void (*start_stats)(struct efx_nic *efx); void (*stop_stats)(struct efx_nic *efx); void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 66c71ed..b9b1277 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -429,3 +429,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) } } } + +/** + * efx_nic_describe_stats - Describe supported statistics for ethtool + * @desc: Array of &struct efx_hw_stat_desc describing the statistics + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @names: Buffer to copy names to, or %NULL. The names are copied + * starting at intervals of %ETH_GSTRING_LEN bytes. + * + * Returns the number of visible statistics, i.e. the number of set + * bits in the first @count bits of @mask for which a name is defined. + */ +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names) +{ + size_t visible = 0; + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].name) { + if (names) { + strlcpy(names, desc[index].name, + ETH_GSTRING_LEN); + names += ETH_GSTRING_LEN; + } + ++visible; + } + } + + return visible; +} + +/** + * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 + * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer + * layout. DMA widths of 0, 16, 32 and 64 are supported; where + * the width is specified as 0 the corresponding element of + * @stats is not updated. + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @stats: Buffer to update with the converted statistics. The length + * of this array must be at least the number of set bits in the + * first @count bits of @mask. + * @dma_buf: DMA buffer containing hardware statistics + * @accumulate: If set, the converted values will be added rather than + * directly stored to the corresponding elements of @stats + */ +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, + u64 *stats, const void *dma_buf, bool accumulate) +{ + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].dma_width) { + const void *addr = dma_buf + desc[index].offset; + u64 val; + + switch (desc[index].dma_width) { + case 16: + val = le16_to_cpup((__le16 *)addr); + break; + case 32: + val = le32_to_cpup((__le32 *)addr); + break; + case 64: + val = le64_to_cpup((__le64 *)addr); + break; + default: + WARN_ON(1); + val = 0; + break; + } + + if (accumulate) + *stats += val; + else + *stats = val; + } + + ++stats; + } +} diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index c3e0f1f..9afbf36 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -191,10 +191,62 @@ static inline bool falcon_spi_present(const struct falcon_spi_device *spi) return spi->size != 0; } +enum { + FALCON_STAT_tx_bytes, + FALCON_STAT_tx_packets, + FALCON_STAT_tx_pause, + FALCON_STAT_tx_control, + FALCON_STAT_tx_unicast, + FALCON_STAT_tx_multicast, + FALCON_STAT_tx_broadcast, + FALCON_STAT_tx_lt64, + FALCON_STAT_tx_64, + FALCON_STAT_tx_65_to_127, + FALCON_STAT_tx_128_to_255, + FALCON_STAT_tx_256_to_511, + FALCON_STAT_tx_512_to_1023, + FALCON_STAT_tx_1024_to_15xx, + FALCON_STAT_tx_15xx_to_jumbo, + FALCON_STAT_tx_gtjumbo, + FALCON_STAT_tx_non_tcpudp, + FALCON_STAT_tx_mac_src_error, + FALCON_STAT_tx_ip_src_error, + FALCON_STAT_rx_bytes, + FALCON_STAT_rx_good_bytes, + FALCON_STAT_rx_bad_bytes, + FALCON_STAT_rx_packets, + FALCON_STAT_rx_good, + FALCON_STAT_rx_bad, + FALCON_STAT_rx_pause, + FALCON_STAT_rx_control, + FALCON_STAT_rx_unicast, + FALCON_STAT_rx_multicast, + FALCON_STAT_rx_broadcast, + FALCON_STAT_rx_lt64, + FALCON_STAT_rx_64, + FALCON_STAT_rx_65_to_127, + FALCON_STAT_rx_128_to_255, + FALCON_STAT_rx_256_to_511, + FALCON_STAT_rx_512_to_1023, + FALCON_STAT_rx_1024_to_15xx, + FALCON_STAT_rx_15xx_to_jumbo, + FALCON_STAT_rx_gtjumbo, + FALCON_STAT_rx_bad_lt64, + FALCON_STAT_rx_bad_gtjumbo, + FALCON_STAT_rx_overflow, + FALCON_STAT_rx_symbol_error, + FALCON_STAT_rx_align_error, + FALCON_STAT_rx_length_error, + FALCON_STAT_rx_internal_error, + FALCON_STAT_rx_nodesc_drop_cnt, + FALCON_STAT_COUNT +}; + /** * struct falcon_nic_data - Falcon NIC state * @pci_dev2: Secondary function of Falcon A * @board: Board state and functions + * @stats: Hardware statistics * @stats_disable_count: Nest count for disabling statistics fetches * @stats_pending: Is there a pending DMA of MAC statistics. * @stats_timer: A timer for regularly fetching MAC statistics. @@ -207,6 +259,7 @@ static inline bool falcon_spi_present(const struct falcon_spi_device *spi) struct falcon_nic_data { struct pci_dev *pci_dev2; struct falcon_board board; + u64 stats[FALCON_STAT_COUNT]; unsigned int stats_disable_count; bool stats_pending; struct timer_list stats_timer; @@ -223,12 +276,75 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) return &data->board; } +enum { + SIENA_STAT_tx_bytes, + SIENA_STAT_tx_good_bytes, + SIENA_STAT_tx_bad_bytes, + SIENA_STAT_tx_packets, + SIENA_STAT_tx_bad, + SIENA_STAT_tx_pause, + SIENA_STAT_tx_control, + SIENA_STAT_tx_unicast, + SIENA_STAT_tx_multicast, + SIENA_STAT_tx_broadcast, + SIENA_STAT_tx_lt64, + SIENA_STAT_tx_64, + SIENA_STAT_tx_65_to_127, + SIENA_STAT_tx_128_to_255, + SIENA_STAT_tx_256_to_511, + SIENA_STAT_tx_512_to_1023, + SIENA_STAT_tx_1024_to_15xx, + SIENA_STAT_tx_15xx_to_jumbo, + SIENA_STAT_tx_gtjumbo, + SIENA_STAT_tx_collision, + SIENA_STAT_tx_single_collision, + SIENA_STAT_tx_multiple_collision, + SIENA_STAT_tx_excessive_collision, + SIENA_STAT_tx_deferred, + SIENA_STAT_tx_late_collision, + SIENA_STAT_tx_excessive_deferred, + SIENA_STAT_tx_non_tcpudp, + SIENA_STAT_tx_mac_src_error, + SIENA_STAT_tx_ip_src_error, + SIENA_STAT_rx_bytes, + SIENA_STAT_rx_good_bytes, + SIENA_STAT_rx_bad_bytes, + SIENA_STAT_rx_packets, + SIENA_STAT_rx_good, + SIENA_STAT_rx_bad, + SIENA_STAT_rx_pause, + SIENA_STAT_rx_control, + SIENA_STAT_rx_unicast, + SIENA_STAT_rx_multicast, + SIENA_STAT_rx_broadcast, + SIENA_STAT_rx_lt64, + SIENA_STAT_rx_64, + SIENA_STAT_rx_65_to_127, + SIENA_STAT_rx_128_to_255, + SIENA_STAT_rx_256_to_511, + SIENA_STAT_rx_512_to_1023, + SIENA_STAT_rx_1024_to_15xx, + SIENA_STAT_rx_15xx_to_jumbo, + SIENA_STAT_rx_gtjumbo, + SIENA_STAT_rx_bad_gtjumbo, + SIENA_STAT_rx_overflow, + SIENA_STAT_rx_false_carrier, + SIENA_STAT_rx_symbol_error, + SIENA_STAT_rx_align_error, + SIENA_STAT_rx_length_error, + SIENA_STAT_rx_internal_error, + SIENA_STAT_rx_nodesc_drop_cnt, + SIENA_STAT_COUNT +}; + /** * struct siena_nic_data - Siena NIC state * @wol_filter_id: Wake-on-LAN packet filter id + * @stats: Hardware statistics */ struct siena_nic_data { int wol_filter_id; + u64 stats[SIENA_STAT_COUNT]; }; /* @@ -533,6 +649,14 @@ extern int efx_farch_test_registers(struct efx_nic *efx, extern size_t efx_nic_get_regs_len(struct efx_nic *efx); extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); +extern size_t +efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +extern void +efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, + u64 *stats, const void *dma_buf, bool accumulate); + #define EFX_MAX_FLUSH_TIME 5000 extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 11b4739..a51d90c 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -380,117 +380,160 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_fini(efx); } +#define SIENA_DMA_STAT(ext_name, mcdi_name) \ + [SIENA_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define SIENA_OTHER_STAT(ext_name) \ + [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = { + SIENA_DMA_STAT(tx_bytes, TX_BYTES), + SIENA_OTHER_STAT(tx_good_bytes), + SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES), + SIENA_DMA_STAT(tx_packets, TX_PKTS), + SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS), + SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS), + SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS), + SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), + SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), + SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), + SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS), + SIENA_DMA_STAT(tx_64, TX_64_PKTS), + SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), + SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), + SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), + SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), + SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS), + SIENA_OTHER_STAT(tx_collision), + SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS), + SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS), + SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS), + SIENA_DMA_STAT(rx_bytes, RX_BYTES), + SIENA_OTHER_STAT(rx_good_bytes), + SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES), + SIENA_DMA_STAT(rx_packets, RX_PKTS), + SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS), + SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), + SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS), + SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS), + SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), + SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), + SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), + SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), + SIENA_DMA_STAT(rx_64, RX_64_PKTS), + SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), + SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), + SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), + SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), + SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), + SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), + SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), + SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS), + SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS), + SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), + SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), + SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS), + SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS), +}; +static const unsigned long siena_stat_mask[] = { + [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL, +}; + +static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names) +{ + return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, + siena_stat_mask, names); +} + static int siena_try_update_nic_stats(struct efx_nic *efx) { + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; __le64 *dma_stats; - struct efx_mac_stats *mac_stats; __le64 generation_start, generation_end; - mac_stats = &efx->mac_stats; dma_stats = efx->stats_buffer.addr; generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; if (generation_end == EFX_MC_STATS_GENERATION_INVALID) return 0; rmb(); - -#define MAC_STAT(M, D) \ - mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D]) - - MAC_STAT(tx_bytes, TX_BYTES); - MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); - efx_update_diff_stat(&mac_stats->tx_good_bytes, - mac_stats->tx_bytes - mac_stats->tx_bad_bytes); - MAC_STAT(tx_packets, TX_PKTS); - MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); - MAC_STAT(tx_pause, TX_PAUSE_PKTS); - MAC_STAT(tx_control, TX_CONTROL_PKTS); - MAC_STAT(tx_unicast, TX_UNICAST_PKTS); - MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); - MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); - MAC_STAT(tx_lt64, TX_LT64_PKTS); - MAC_STAT(tx_64, TX_64_PKTS); - MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); - MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); - MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); - MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); - MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); - MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); - MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); - mac_stats->tx_collision = 0; - MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); - MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); - MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); - MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); - MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); - mac_stats->tx_collision = (mac_stats->tx_single_collision + - mac_stats->tx_multiple_collision + - mac_stats->tx_excessive_collision + - mac_stats->tx_late_collision); - MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); - MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); - MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); - MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); - MAC_STAT(rx_bytes, RX_BYTES); - MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); - efx_update_diff_stat(&mac_stats->rx_good_bytes, - mac_stats->rx_bytes - mac_stats->rx_bad_bytes); - MAC_STAT(rx_packets, RX_PKTS); - MAC_STAT(rx_good, RX_GOOD_PKTS); - MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); - MAC_STAT(rx_pause, RX_PAUSE_PKTS); - MAC_STAT(rx_control, RX_CONTROL_PKTS); - MAC_STAT(rx_unicast, RX_UNICAST_PKTS); - MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); - MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); - MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); - MAC_STAT(rx_64, RX_64_PKTS); - MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); - MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); - MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); - MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); - MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); - MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); - MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); - mac_stats->rx_bad_lt64 = 0; - mac_stats->rx_bad_64_to_15xx = 0; - mac_stats->rx_bad_15xx_to_jumbo = 0; - MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); - MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); - mac_stats->rx_missed = 0; - MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); - MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); - MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); - MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); - MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); - mac_stats->rx_good_lt64 = 0; - - efx->n_rx_nodesc_drop_cnt = - le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]); - -#undef MAC_STAT - + efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, + stats, efx->stats_buffer.addr, false); rmb(); generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; if (generation_end != generation_start) return -EAGAIN; + /* Update derived statistics */ + efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes], + stats[SIENA_STAT_tx_bytes] - + stats[SIENA_STAT_tx_bad_bytes]); + stats[SIENA_STAT_tx_collision] = + stats[SIENA_STAT_tx_single_collision] + + stats[SIENA_STAT_tx_multiple_collision] + + stats[SIENA_STAT_tx_excessive_collision] + + stats[SIENA_STAT_tx_late_collision]; + efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes], + stats[SIENA_STAT_rx_bytes] - + stats[SIENA_STAT_rx_bad_bytes]); return 0; } -static void siena_update_nic_stats(struct efx_nic *efx) +static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) { + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; int retry; /* If we're unlucky enough to read statistics wduring the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ for (retry = 0; retry < 100; ++retry) { if (siena_try_update_nic_stats(efx) == 0) - return; + break; udelay(100); } - /* Use the old values instead */ + if (full_stats) + memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT); + + if (core_stats) { + core_stats->rx_packets = stats[SIENA_STAT_rx_packets]; + core_stats->tx_packets = stats[SIENA_STAT_tx_packets]; + core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes]; + core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes]; + core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt]; + core_stats->multicast = stats[SIENA_STAT_rx_multicast]; + core_stats->collisions = stats[SIENA_STAT_tx_collision]; + core_stats->rx_length_errors = + stats[SIENA_STAT_rx_gtjumbo] + + stats[SIENA_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow]; + core_stats->tx_window_errors = + stats[SIENA_STAT_tx_late_collision]; + + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors + + stats[SIENA_STAT_rx_symbol_error]); + core_stats->tx_errors = (core_stats->tx_window_errors + + stats[SIENA_STAT_tx_bad]); + } + + return SIENA_STAT_COUNT; } static int siena_mac_reconfigure(struct efx_nic *efx) @@ -652,6 +695,7 @@ static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, static int siena_mcdi_poll_reboot(struct efx_nic *efx) { + struct siena_nic_data *nic_data = efx->nic_data; unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); efx_dword_t reg; u32 value; @@ -665,6 +709,12 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx) EFX_ZERO_DWORD(reg); efx_writed(efx, ®, addr); + /* MAC statistics have been cleared on the NIC; clear the local + * copies that we update with efx_update_diff_stat(). + */ + nic_data->stats[SIENA_STAT_tx_good_bytes] = 0; + nic_data->stats[SIENA_STAT_rx_good_bytes] = 0; + if (value == MC_STATUS_DWORD_ASSERT) return -EINTR; else @@ -830,6 +880,7 @@ const struct efx_nic_type siena_a0_nic_type = { .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = siena_prepare_flush, .finish_flush = siena_finish_flush, + .describe_stats = siena_describe_nic_stats, .update_stats = siena_update_nic_stats, .start_stats = efx_mcdi_mac_start_stats, .stop_stats = efx_mcdi_mac_stop_stats, -- cgit v0.10.2 From c1d828bdca00f29e00c1650750ce0445d8b4458d Mon Sep 17 00:00:00 2001 From: Laurence Evans Date: Wed, 6 Mar 2013 15:33:17 +0000 Subject: sfc: PTP MCDI requests need to initialise periph ID field This field is ignored by Siena firmware but is significant to EF10 firmware. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index b386901..4de2f8f 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -313,6 +313,7 @@ static int efx_ptp_enable(struct efx_nic *efx) MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, efx->ptp_data->channel->channel); MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); @@ -331,6 +332,7 @@ static int efx_ptp_disable(struct efx_nic *efx) MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), NULL, 0, NULL); } @@ -531,6 +533,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) int *start = ptp->start.addr; MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); + MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0); MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, num_readings); MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR, @@ -574,6 +577,7 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) size_t len; MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0); MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); if (skb_shinfo(skb)->nr_frags != 0) { rc = skb_linearize(skb); @@ -1377,6 +1381,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) (PPB_EXTRA_BITS + MAX_PPB_BITS)); MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0); MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); @@ -1399,6 +1404,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); @@ -1417,6 +1423,7 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) int rc; MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), NULL); -- cgit v0.10.2 From 977a5d5d32f4797ace5ef65ee3f2232a1c88a274 Mon Sep 17 00:00:00 2001 From: Laurence Evans Date: Thu, 7 Mar 2013 11:46:58 +0000 Subject: sfc: Add a function pointer to abstract write of host time into NIC shared memory Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d33656b..21b1c2a 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1117,6 +1117,7 @@ struct efx_nic_type { size_t *retlen, const u8 *buffer); int (*mtd_sync)(struct mtd_info *mtd); #endif + void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time); int revision; unsigned int txd_ptr_tbl_base; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 4de2f8f..c60cabb 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -390,8 +390,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | now.ts_real.tv_nsec); /* Update host time in NIC memory */ - _efx_writed(efx, cpu_to_le32(host_time), - FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); + efx->type->ptp_write_host_time(efx, host_time); } *last_time = now; } diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index a51d90c..3c0a544 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -855,6 +855,19 @@ fail: /************************************************************************** * + * PTP + * + ************************************************************************** + */ + +static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), + FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); +} + +/************************************************************************** + * * Revision-dependent attributes used by efx.c and nic.c * ************************************************************************** @@ -943,6 +956,7 @@ const struct efx_nic_type siena_a0_nic_type = { .mtd_write = efx_mcdi_mtd_write, .mtd_sync = efx_mcdi_mtd_sync, #endif + .ptp_write_host_time = siena_ptp_write_host_time, .revision = EFX_REV_SIENA_A0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, -- cgit v0.10.2 From 3de82b91ea604d7178925ce80ab821c968009c21 Mon Sep 17 00:00:00 2001 From: Alexandre Rames Date: Thu, 13 Jun 2013 11:36:15 +0100 Subject: sfc: Add EF10 support for TX/RX DMA error events handling. Also, since we handle all DMA errors in the same way, merge RESET_TYPE_(RX|TX)_DESC_FETCH into RESET_TYPE_DMA_ERROR. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 9fe375f..efad5f7 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -80,8 +80,7 @@ const char *const efx_reset_type_names[] = { [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", [RESET_TYPE_INT_ERROR] = "INT_ERROR", [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", - [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", - [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", + [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", [RESET_TYPE_TX_SKIP] = "TX_SKIP", [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", }; diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index ab8fb58..8665921 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -147,8 +147,7 @@ enum efx_loopback_mode { * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog * @RESET_TYPE_INT_ERROR: reset due to internal error * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors - * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch - * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch + * @RESET_TYPE_DMA_ERROR: DMA error * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors * @RESET_TYPE_MC_FAILURE: MC reboot/assertion */ @@ -163,8 +162,7 @@ enum reset_type { RESET_TYPE_TX_WATCHDOG, RESET_TYPE_INT_ERROR, RESET_TYPE_RX_RECOVERY, - RESET_TYPE_RX_DESC_FETCH, - RESET_TYPE_TX_DESC_FETCH, + RESET_TYPE_DMA_ERROR, RESET_TYPE_TX_SKIP, RESET_TYPE_MC_FAILURE, RESET_TYPE_MAX, diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 0be1f37..1140b83 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -1893,8 +1893,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason) { switch (reason) { case RESET_TYPE_RX_RECOVERY: - case RESET_TYPE_RX_DESC_FETCH: - case RESET_TYPE_TX_DESC_FETCH: + case RESET_TYPE_DMA_ERROR: case RESET_TYPE_TX_SKIP: /* These can occasionally occur due to hardware bugs. * We try to reset without disrupting the link. diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index b6af8f4..d21483d 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -832,7 +832,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) efx_farch_notify_tx_desc(tx_queue); netif_tx_unlock(efx->net_dev); } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { - efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else { netif_err(efx, tx_err, efx->net_dev, "channel %d unexpected TX event " @@ -1217,7 +1217,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) "RX DMA Q %d reports descriptor fetch error." " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else efx_sriov_desc_fetch_err(efx, ev_sub_data); break; @@ -1227,7 +1227,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) "TX DMA Q %d reports descriptor fetch error." " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else efx_sriov_desc_fetch_err(efx, ev_sub_data); break; diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index ff90ebf..3810634 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -587,6 +587,14 @@ void efx_mcdi_process_event(struct efx_channel *channel, efx_ptp_event(efx, event); break; + case MCDI_EVENT_CODE_TX_ERR: + case MCDI_EVENT_CODE_RX_ERR: + netif_err(efx, hw, efx->net_dev, + "%s DMA error (event: "EFX_QWORD_FMT")\n", + code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", + EFX_QWORD_VAL(*event)); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + break; default: netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", code); -- cgit v0.10.2 From d36a08b4ae08566426ddb7519b869ec0cd040532 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Thu, 20 Jun 2013 11:40:07 +0100 Subject: sfc: use MCDI epoch flag to improve MC reboot detection in the driver The Huntington MC will reject all MCDI requests after an MC reboot until it sees one with the NOT_EPOCH flag clear. This flag is set by default for all requests, and then cleared on the first request after we detect that an MC reboot has occurred. The old MCDI_STATUS_DELAY_COUNT gave a timeout of 10ms, which was not long enough for the driver to detect that a reboot had occurred based on the warm boot count while calling efx_mcdi_poll_reboot() from the loop in efx_mcdi_ev_death(). Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 3810634..63121ad 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -26,9 +26,10 @@ /* A reboot/assertion causes the MCDI status word to be set after the * command word is set or a REBOOT event is sent. If we notice a reboot - * via these mechanisms then wait 10ms for the status word to be set. */ + * via these mechanisms then wait 20ms for the status word to be set. + */ #define MCDI_STATUS_DELAY_US 100 -#define MCDI_STATUS_DELAY_COUNT 100 +#define MCDI_STATUS_DELAY_COUNT 200 #define MCDI_STATUS_SLEEP_MS \ (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) @@ -56,6 +57,7 @@ int efx_mcdi_init(struct efx_nic *efx) mcdi->mode = MCDI_MODE_POLL; (void) efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; /* Recover from a failed assertion before probing */ return efx_mcdi_handle_assertion(efx); @@ -85,24 +87,26 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, if (efx->type->mcdi_max_ver == 1) { /* MCDI v1 */ - EFX_POPULATE_DWORD_6(hdr[0], + EFX_POPULATE_DWORD_7(hdr[0], MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_CODE, cmd, MCDI_HEADER_DATALEN, inlen, MCDI_HEADER_SEQ, seqno, - MCDI_HEADER_XFLAGS, xflags); + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); hdr_len = 4; } else { /* MCDI v2 */ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); - EFX_POPULATE_DWORD_6(hdr[0], + EFX_POPULATE_DWORD_7(hdr[0], MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_CODE, MC_CMD_V2_EXTN, MCDI_HEADER_DATALEN, 0, MCDI_HEADER_SEQ, seqno, - MCDI_HEADER_XFLAGS, xflags); + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); EFX_POPULATE_DWORD_2(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); @@ -373,6 +377,7 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, spin_unlock_bh(&mcdi->iface_lock); efx_mcdi_copyin(efx, cmd, inbuf, inlen); + mcdi->new_epoch = false; return 0; } @@ -435,6 +440,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, if (rc == -EIO || rc == -EINTR) { msleep(MCDI_STATUS_SLEEP_MS); efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; } } @@ -530,6 +536,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) break; udelay(MCDI_STATUS_DELAY_US); } + mcdi->new_epoch = true; } spin_unlock(&mcdi->iface_lock); diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 5f67ac3..303d9e8 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -36,6 +36,7 @@ enum efx_mcdi_mode { * @state: Request handling state. Waited for by @wq. * @mode: Poll for mcdi completion, or wait for an mcdi_event. * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING + * @new_epoch: Indicates start of day or start of MC reboot recovery * @iface_lock: Serialises access to all the following fields * @seqno: The next sequence number to use for mcdi requests. * @credits: Number of spurious MCDI completion events allowed before we @@ -49,6 +50,7 @@ struct efx_mcdi_iface { enum efx_mcdi_mode mode; wait_queue_head_t wq; spinlock_t iface_lock; + bool new_epoch; unsigned int credits; unsigned int seqno; int resprc; -- cgit v0.10.2 From ba388fdd08ff458fabc8588e565b5d031191c50a Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 27 Jun 2013 00:13:07 +0100 Subject: sfc: Remove early call to efx_nic_type::reconfigure_mac in efx_reset_up() efx_reset_up() calls efx_nic_type::reconfigure_mac once directly, then again through efx_start_all() -> efx_start_port() -> efx->type->reconfigure_mac(). This first call is also made too early to work properly on EF10. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index efad5f7..3af28a8 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2202,8 +2202,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) "could not restore PHY settings\n"); } - efx->type->reconfigure_mac(efx); - efx_enable_interrupts(efx); efx_restore_filters(efx); efx_sriov_reset(efx); -- cgit v0.10.2 From 64a27752dc249bf2ccae53457e6d72e1281d758d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 28 Jun 2013 20:14:46 +0100 Subject: sfc: Rename EFX_PAGE_BLOCK_SIZE to EFX_VI_PAGE_SIZE and adjust comments Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 39f6098..19e8b95 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -204,12 +204,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); } -/* Page-mapped register block size */ -#define EFX_PAGE_BLOCK_SIZE 0x2000 +/* Page size used as step between per-VI registers */ +#define EFX_VI_PAGE_SIZE 0x2000 -/* Calculate offset to page-mapped register block */ +/* Calculate offset to page-mapped register */ #define EFX_PAGED_REG(page, reg) \ - ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) + ((page) * EFX_VI_PAGE_SIZE + (reg)) /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, -- cgit v0.10.2 From 43a3739d559f02cb00d92a51d8f2a7d294a1b5e5 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Thu, 18 Oct 2012 15:49:54 +0100 Subject: sfc: Generalise packet hash lookup to support EF10 RX prefix EF10 uses an entirely different RX prefix format from Falcon-arch. Extend struct efx_nic_type to describe this. [bwh: Also replace the magic numbers used for the Falcon-arch RX prefix] Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3af28a8..db2f119 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -573,7 +573,7 @@ static void efx_start_datapath(struct efx_nic *efx) * support the current MTU, including padding for header * alignment and overruns. */ - efx->rx_dma_len = (efx->type->rx_buffer_hash_size + + efx->rx_dma_len = (efx->rx_prefix_size + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) + @@ -2456,6 +2456,9 @@ static int efx_init_struct(struct efx_nic *efx, strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); efx->net_dev = net_dev; + efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_packet_hash_offset = + efx->type->rx_hash_offset - efx->type->rx_prefix_size; spin_lock_init(&efx->stats_lock); mutex_init(&efx->mac_lock); efx->phy_op = &efx_dummy_phy_operations; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 1140b83..ec77611 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2827,7 +2827,8 @@ const struct efx_nic_type falcon_b0_nic_type = { .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), - .rx_buffer_hash_size = 0x10, + .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, + .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, .rx_buffer_padding = 0, .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, diff --git a/drivers/net/ethernet/sfc/farch_regs.h b/drivers/net/ethernet/sfc/farch_regs.h index 00ef17a..491b103 100644 --- a/drivers/net/ethernet/sfc/farch_regs.h +++ b/drivers/net/ethernet/sfc/farch_regs.h @@ -2925,4 +2925,8 @@ #define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 #define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 +/* RX packet prefix */ +#define FS_BZ_RX_PREFIX_HASH_OFST 12 +#define FS_BZ_RX_PREFIX_SIZE 16 + #endif /* EFX_FARCH_REGS_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 21b1c2a..481e412 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -663,6 +663,9 @@ struct vfdi_status; * @rx_buffer_order: Order (log2) of number of pages for each RX buffer * @rx_buffer_truesize: Amortised allocation size of an RX buffer, * for use in sk_buff::truesize + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data + * (valid only if @rx_prefix_size != 0; always negative) * @rx_hash_key: Toeplitz hash key for RSS * @rx_indir_table: Indirection table for RSS * @rx_scatter: Scatter mode enabled for receives @@ -793,6 +796,8 @@ struct efx_nic { unsigned int rx_page_buf_step; unsigned int rx_bufs_per_page; unsigned int rx_pages_per_batch; + unsigned int rx_prefix_size; + int rx_packet_hash_offset; u8 rx_hash_key[40]; u32 rx_indir_table[128]; bool rx_scatter; @@ -1009,7 +1014,8 @@ struct efx_mtd_partition { * @evq_ptr_tbl_base: Event queue pointer table base address * @evq_rptr_tbl_base: Event queue read-pointer table base address * @max_dma_mask: Maximum possible DMA mask - * @rx_buffer_hash_size: Size of hash at start of RX packet + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_hash_offset: Offset of RX flow hash within prefix * @rx_buffer_padding: Size of padding at end of RX packet * @can_rx_scatter: NIC is able to scatter packet to multiple buffers * @max_interrupt_mode: Highest capability interrupt mode supported @@ -1126,7 +1132,8 @@ struct efx_nic_type { unsigned int evq_ptr_tbl_base; unsigned int evq_rptr_tbl_base; u64 max_dma_mask; - unsigned int rx_buffer_hash_size; + unsigned int rx_prefix_size; + unsigned int rx_hash_offset; unsigned int rx_buffer_padding; bool can_rx_scatter; unsigned int max_interrupt_mode; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 1299092..7646bb3 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -61,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) return page_address(buf->page) + buf->page_offset; } -static inline u32 efx_rx_buf_hash(const u8 *eh) +static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) { - /* The ethernet header is always directly after any hash. */ -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 - return __le32_to_cpup((const __le32 *)(eh - 4)); +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); #else - const u8 *data = eh - 4; + const u8 *data = eh + efx->rx_packet_hash_offset; return (u32)data[0] | (u32)data[1] << 8 | (u32)data[2] << 16 | @@ -439,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, } if (efx->net_dev->features & NETIF_F_RXHASH) - skb->rxhash = efx_rx_buf_hash(eh); + skb->rxhash = efx_rx_buf_hash(efx, eh); skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE); @@ -568,8 +567,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, */ prefetch(efx_rx_buf_va(rx_buf)); - rx_buf->page_offset += efx->type->rx_buffer_hash_size; - rx_buf->len -= efx->type->rx_buffer_hash_size; + rx_buf->page_offset += efx->rx_prefix_size; + rx_buf->len -= efx->rx_prefix_size; if (n_frags > 1) { /* Release/sync DMA mapping for additional fragments. diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 3c0a544..89180d4 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -965,7 +965,8 @@ const struct efx_nic_type siena_a0_nic_type = { .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), - .rx_buffer_hash_size = 0x10, + .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, + .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, .rx_buffer_padding = 0, .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, -- cgit v0.10.2 From 02e121650b5e46ef3f2b3ed1ebc4b70e47799056 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 27 Apr 2013 01:55:21 +0100 Subject: sfc: Add TX merged completion counter Add a counter for TX merged completion events. This is implemented in the common TX path, because the NIC event handlers only know how many descriptors were completed, not how many packets. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 0b8ffdf..6354693 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -65,6 +65,7 @@ static u64 efx_get_atomic_stat(void *field) unsigned int, efx_get_uint_stat) static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { + EFX_ETHTOOL_UINT_TXQ_STAT(merge_events), EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 481e412..f52b38c 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -186,6 +186,7 @@ struct efx_tx_buffer { * variable indicates that the queue is empty. This is to * avoid cache-line ping-pong between the xmit path and the * completion path. + * @merge_events: Number of TX merged completion events * @insert_count: Current insert pointer * This is the number of buffers that have been added to the * software ring. @@ -222,6 +223,7 @@ struct efx_tx_queue { /* Members used mainly on the completion path */ unsigned int read_count ____cacheline_aligned_in_smp; unsigned int old_write_count; + unsigned int merge_events; /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 4903c4f..85ee647 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -437,6 +437,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); + if (pkts_compl > 1) + ++tx_queue->merge_events; + /* See if we need to restart the netif queue. This memory * barrier ensures that we write read_count (inside * efx_dequeue_buffers()) before reading the queue status. -- cgit v0.10.2 From 3dced740c243f454a9b83fbe73133911d5c6ce8d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 27 Apr 2013 01:55:18 +0100 Subject: sfc: Add support for reading packet length from prefix Define a flag for struct efx_rx_buffer and efx_rx_packet() that indicates packet length must be read from the prefix. If this is set, read the length in __efx_rx_packet() (when the prefix should have arrived in cache). Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f52b38c..e85bed0 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -263,6 +263,7 @@ struct efx_rx_buffer { #define EFX_RX_PKT_CSUMMED 0x0002 #define EFX_RX_PKT_DISCARD 0x0004 #define EFX_RX_PKT_TCP 0x0040 +#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ /** * struct efx_rx_page_state - Page-based rx buffer state @@ -668,6 +669,8 @@ struct vfdi_status; * @rx_prefix_size: Size of RX prefix before packet data * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data * (valid only if @rx_prefix_size != 0; always negative) + * @rx_packet_len_offset: Offset of RX packet length from start of packet data + * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative) * @rx_hash_key: Toeplitz hash key for RSS * @rx_indir_table: Indirection table for RSS * @rx_scatter: Scatter mode enabled for receives @@ -800,6 +803,7 @@ struct efx_nic { unsigned int rx_pages_per_batch; unsigned int rx_prefix_size; int rx_packet_hash_offset; + int rx_packet_len_offset; u8 rx_hash_key[40]; u32 rx_indir_table[128]; bool rx_scatter; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 7646bb3..864b6ff 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -526,7 +526,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, /* Validate the number of fragments and completed length */ if (n_frags == 1) { - efx_rx_packet__check_len(rx_queue, rx_buf, len); + if (!(flags & EFX_RX_PKT_PREFIX_LEN)) + efx_rx_packet__check_len(rx_queue, rx_buf, len); } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || @@ -554,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, return; } - if (n_frags == 1) + if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) rx_buf->len = len; /* Release and/or sync the DMA mapping - assumes all RX buffers @@ -633,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel) efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); u8 *eh = efx_rx_buf_va(rx_buf); + /* Read length from the prefix if necessary. This already + * excludes the length of the prefix itself. + */ + if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) + rx_buf->len = le16_to_cpup((__le16 *) + (eh + efx->rx_packet_len_offset)); + /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ -- cgit v0.10.2 From 8c4e720f181f251948c52913c74a86f68aed9c50 Mon Sep 17 00:00:00 2001 From: Alexandre Rames Date: Wed, 3 Jul 2013 09:47:34 +0100 Subject: sfc: Return an error code when a sensor is busy. [bwh: Also name this new state, though we don't expect to see it in an event] Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 958c73f..544889b 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -54,6 +54,7 @@ static const char *const sensor_status_names[] = { [MC_CMD_SENSOR_STATE_WARNING] = "Warning", [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", + [MC_CMD_SENSOR_STATE_NO_READING] = "No reading", }; void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) @@ -144,13 +145,17 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev, struct efx_mcdi_mon_attribute *mon_attr = container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); efx_dword_t entry; - unsigned int value; + unsigned int value, state; int rc; rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); if (rc) return rc; + state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + if (state == MC_CMD_SENSOR_STATE_NO_READING) + return -EBUSY; + value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); /* Convert temperature from degrees to milli-degrees Celsius */ -- cgit v0.10.2 From d4fbdcfe93928fbcb7374ea490e41f7b69d95380 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 8 Aug 2013 11:14:20 +0100 Subject: sfc: Use extended MC_CMD_SENSOR_INFO and MC_CMD_READ_SENSORS We need to use extended requests to read and get metadata for sensors numbered > 31. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 544889b..d7d4566 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -28,7 +28,7 @@ static const struct { const char *label; enum efx_hwmon_type hwmon_type; int port; -} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = { +} efx_mcdi_sensor_type[] = { #define SENSOR(name, label, hwmon_type, port) \ [MC_CMD_SENSOR_##name] = { label, hwmon_type, port } SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1), @@ -86,6 +86,7 @@ struct efx_mcdi_mon_attribute { struct device_attribute dev_attr; unsigned int index; unsigned int type; + enum efx_hwmon_type hwmon_type; unsigned int limit_value; char name[12]; }; @@ -93,11 +94,12 @@ struct efx_mcdi_mon_attribute { static int efx_mcdi_mon_update(struct efx_nic *efx) { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); - MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_IN_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN); int rc; - MCDI_SET_QWORD(inbuf, READ_SENSORS_IN_DMA_ADDR, + MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR, hwmon->dma_buf.dma_addr); + MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len); rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, inbuf, sizeof(inbuf), NULL, 0, NULL); @@ -159,7 +161,7 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev, value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); /* Convert temperature from degrees to milli-degrees Celsius */ - if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP) + if (mon_attr->hwmon_type == EFX_HWMON_TEMP) value *= 1000; return sprintf(buf, "%u\n", value); @@ -176,7 +178,7 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev, value = mon_attr->limit_value; /* Convert temperature from degrees to milli-degrees Celsius */ - if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP) + if (mon_attr->hwmon_type == EFX_HWMON_TEMP) value *= 1000; return sprintf(buf, "%u\n", value); @@ -224,6 +226,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, strlcpy(attr->name, name, sizeof(attr->name)); attr->index = index; attr->type = type; + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) + attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + else + attr->hwmon_type = EFX_HWMON_UNKNOWN; attr->limit_value = limit_value; sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.name = attr->name; @@ -238,35 +244,42 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, int efx_mcdi_mon_probe(struct efx_nic *efx) { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); - unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0; + unsigned int n_temp = 0, n_cool = 0, n_in = 0; + MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); + unsigned int n_pages, n_sensors, n_attrs, page; size_t outlen; char name[12]; u32 mask; - int rc, i, type; + int rc, i, j, type; - BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0); + /* Find out how many sensors are present */ + n_sensors = 0; + page = 0; + do { + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); - rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) - return rc; - if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) - return -EIO; - - /* Find out which sensors are present. Don't create a device - * if there are none. - */ - mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); - if (mask == 0) + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) + return -EIO; + + mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); + n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + ++page; + } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT)); + n_pages = page; + + /* Don't create a device if there are none */ + if (n_sensors == 0) return 0; - /* Check again for short response */ - if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) - return -EIO; - - rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf, - 4 * MC_CMD_SENSOR_ENTRY_MAXNUM, GFP_KERNEL); + rc = efx_nic_alloc_buffer( + efx, &hwmon->dma_buf, + n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN, + GFP_KERNEL); if (rc) return rc; @@ -277,7 +290,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) * attributes for this set of sensors: name of the driver plus * value, min, max, crit, alarm and label for each sensor. */ - n_attrs = 1 + 6 * hweight32(mask); + n_attrs = 1 + 6 * n_sensors; hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); if (!hwmon->attrs) { rc = -ENOMEM; @@ -294,26 +307,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) if (rc) goto fail; - for (i = 0, type = -1; ; i++) { + for (i = 0, j = -1, type = -1; ; i++) { + enum efx_hwmon_type hwmon_type; const char *hwmon_prefix; unsigned hwmon_index; u16 min1, max1, min2, max2; /* Find next sensor type or exit if there is none */ - type++; - while (!(mask & (1 << type))) { + do { type++; - if (type == 32) - return 0; - } - /* Skip sensors specific to a different port */ - if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN && - efx_mcdi_sensor_type[type].port >= 0 && - efx_mcdi_sensor_type[type].port != efx_port_num(efx)) - continue; + if ((type % 32) == 0) { + page = type / 32; + j = -1; + if (page == n_pages) + return 0; + + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, + page); + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + mask = (MCDI_DWORD(outbuf, + SENSOR_INFO_OUT_MASK) & + ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + + /* Check again for short response */ + if (outlen < + MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) { + rc = -EIO; + goto fail; + } + } + } while (!(mask & (1 << type % 32))); + j++; + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + + /* Skip sensors specific to a different port */ + if (hwmon_type != EFX_HWMON_UNKNOWN && + efx_mcdi_sensor_type[type].port >= 0 && + efx_mcdi_sensor_type[type].port != + efx_port_num(efx)) + continue; + } else { + hwmon_type = EFX_HWMON_UNKNOWN; + } - switch (efx_mcdi_sensor_type[type].hwmon_type) { + switch (hwmon_type) { case EFX_HWMON_TEMP: hwmon_prefix = "temp"; hwmon_index = ++n_temp; /* 1-based */ @@ -333,13 +383,13 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) } min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, - SENSOR_INFO_ENTRY, i, MIN1); + SENSOR_INFO_ENTRY, j, MIN1); max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, - SENSOR_INFO_ENTRY, i, MAX1); + SENSOR_INFO_ENTRY, j, MAX1); min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, - SENSOR_INFO_ENTRY, i, MIN2); + SENSOR_INFO_ENTRY, j, MIN2); max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, - SENSOR_INFO_ENTRY, i, MAX2); + SENSOR_INFO_ENTRY, j, MAX2); if (min1 != max1) { snprintf(name, sizeof(name), "%s%u_input", @@ -386,7 +436,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) if (rc) goto fail; - if (efx_mcdi_sensor_type[type].label) { + if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && + efx_mcdi_sensor_type[type].label) { snprintf(name, sizeof(name), "%s%u_label", hwmon_prefix, hwmon_index); rc = efx_mcdi_mon_add_attr( -- cgit v0.10.2 From affe759dbaa9e6c08b0da0a11d1933b61f199f51 Mon Sep 17 00:00:00 2001 From: Phil Oester Date: Wed, 26 Jun 2013 17:16:28 -0400 Subject: netfilter: ip[6]t_REJECT: tcp-reset using wrong MAC source if bridged As reported by Casper Gripenberg, in a bridged setup, using ip[6]t_REJECT with the tcp-reset option sends out reset packets with the src MAC address of the local bridge interface, instead of the MAC address of the intended destination. This causes some routers/firewalls to drop the reset packet as it appears to be spoofed. Fix this by bypassing ip[6]_local_out and setting the MAC of the sender in the tcp reset packet. This closes netfilter bugzilla #531. Signed-off-by: Phil Oester Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 04b18c1..b969131 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -119,7 +119,26 @@ static void send_reset(struct sk_buff *oldskb, int hook) nf_ct_attach(nskb, oldskb); - ip_local_out(nskb); +#ifdef CONFIG_BRIDGE_NETFILTER + /* If we use ip_local_out for bridged traffic, the MAC source on + * the RST will be ours, instead of the destination's. This confuses + * some routers/firewalls, and they drop the packet. So we need to + * build the eth header using the original destination's MAC as the + * source, and send the RST packet directly. + */ + if (oldskb->nf_bridge) { + struct ethhdr *oeth = eth_hdr(oldskb); + nskb->dev = oldskb->nf_bridge->physindev; + niph->tot_len = htons(nskb->len); + ip_send_check(niph); + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), + oeth->h_source, oeth->h_dest, nskb->len) < 0) + goto free_nskb; + dev_queue_xmit(nskb); + } else +#endif + ip_local_out(nskb); + return; free_nskb: diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 70f9abc..56eef30 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -169,7 +169,25 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) nf_ct_attach(nskb, oldskb); - ip6_local_out(nskb); +#ifdef CONFIG_BRIDGE_NETFILTER + /* If we use ip6_local_out for bridged traffic, the MAC source on + * the RST will be ours, instead of the destination's. This confuses + * some routers/firewalls, and they drop the packet. So we need to + * build the eth header using the original destination's MAC as the + * source, and send the RST packet directly. + */ + if (oldskb->nf_bridge) { + struct ethhdr *oeth = eth_hdr(oldskb); + nskb->dev = oldskb->nf_bridge->physindev; + nskb->protocol = htons(ETH_P_IPV6); + ip6h->payload_len = htons(sizeof(struct tcphdr)); + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), + oeth->h_source, oeth->h_dest, nskb->len) < 0) + return; + dev_queue_xmit(nskb); + } else +#endif + ip6_local_out(nskb); } static inline void -- cgit v0.10.2 From 706f5151e349a3d8ab85237d0d6c553930376e9f Mon Sep 17 00:00:00 2001 From: Nathan Hintz Date: Thu, 22 Aug 2013 22:09:12 -0700 Subject: netfilter: nf_defrag_ipv6.o included twice 'nf_defrag_ipv6' is built as a separate module; it shouldn't be included in the 'nf_conntrack_ipv6' module as well. Signed-off-by: Nathan Hintz Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index 2d11fcc..ad13bf0 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -14,7 +14,7 @@ obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o # l3 independent conntrack -obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o +obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o -- cgit v0.10.2 From 41d73ec053d2424599c4ed8452b889374d523ade Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 27 Aug 2013 08:50:12 +0200 Subject: netfilter: nf_conntrack: make sequence number adjustments usuable without NAT Split out sequence number adjustments from NAT and move them to the conntrack core to make them usable for SYN proxying. The sequence number adjustment information is moved to a seperate extend. The extend is added to new conntracks when a NAT mapping is set up for a connection using a helper. As a side effect, this saves 24 bytes per connection with NAT in the common case that a connection does not have a helper assigned. Signed-off-by: Patrick McHardy Tested-by: Martin Topholm Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index e2cf786be..708fe72ab9 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -319,6 +319,7 @@ extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *); extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; struct nf_conn; +enum ip_conntrack_info; struct nlattr; struct nfq_ct_hook { @@ -327,14 +328,10 @@ struct nfq_ct_hook { int (*parse)(const struct nlattr *attr, struct nf_conn *ct); int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report); -}; -extern struct nfq_ct_hook __rcu *nfq_ct_hook; - -struct nfq_ct_nat_hook { void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, - u32 ctinfo, s32 off); + enum ip_conntrack_info ctinfo, s32 off); }; -extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook; +extern struct nfq_ct_hook __rcu *nfq_ct_hook; #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} #endif diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 977bc8a..2a22bcb 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -9,6 +9,7 @@ enum nf_ct_ext_id { NF_CT_EXT_HELPER, #if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) NF_CT_EXT_NAT, + NF_CT_EXT_SEQADJ, #endif NF_CT_EXT_ACCT, #ifdef CONFIG_NF_CONNTRACK_EVENTS @@ -31,6 +32,7 @@ enum nf_ct_ext_id { #define NF_CT_EXT_HELPER_TYPE struct nf_conn_help #define NF_CT_EXT_NAT_TYPE struct nf_conn_nat +#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj #define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache #define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h new file mode 100644 index 0000000..30bfbbe --- /dev/null +++ b/include/net/netfilter/nf_conntrack_seqadj.h @@ -0,0 +1,49 @@ +#ifndef _NF_CONNTRACK_SEQADJ_H +#define _NF_CONNTRACK_SEQADJ_H + +#include + +/** + * struct nf_ct_seqadj - sequence number adjustment information + * + * @correction_pos: position of the last TCP sequence number modification + * @offset_before: sequence number offset before last modification + * @offset_after: sequence number offset after last modification + */ +struct nf_ct_seqadj { + u32 correction_pos; + s32 offset_before; + s32 offset_after; +}; + +struct nf_conn_seqadj { + struct nf_ct_seqadj seq[IP_CT_DIR_MAX]; +}; + +static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct) +{ + return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ); +} + +static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct) +{ + return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC); +} + +extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + __be32 seq, s32 off); +extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + s32 off); + +extern int nf_ct_seq_adjust(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff); +extern s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, + u32 seq); + +extern int nf_conntrack_seqadj_init(void); +extern void nf_conntrack_seqadj_fini(void); + +#endif /* _NF_CONNTRACK_SEQADJ_H */ diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index e244141..59a1924 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -13,15 +13,6 @@ enum nf_nat_manip_type { #define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \ (hooknum) != NF_INET_LOCAL_IN) -/* NAT sequence number modifications */ -struct nf_nat_seq { - /* position of the last TCP sequence number modification (if any) */ - u_int32_t correction_pos; - - /* sequence number offset before and after last modification */ - int32_t offset_before, offset_after; -}; - #include #include #include @@ -39,7 +30,6 @@ struct nf_conn; /* The structure embedded in the conntrack structure. */ struct nf_conn_nat { struct hlist_node bysource; - struct nf_nat_seq seq[IP_CT_DIR_MAX]; struct nf_conn *ct; union nf_conntrack_nat_help help; #if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 194c347..404324d 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -39,28 +39,9 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb, const char *rep_buffer, unsigned int rep_len); -extern void nf_nat_set_seq_adjust(struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - __be32 seq, s32 off); -extern int nf_nat_seq_adjust(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff); -extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff); - /* Setup NAT on this expected conntrack so it follows master, but goes * to port ct->master->saved_proto. */ extern void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this); -extern s32 nf_nat_get_offset(const struct nf_conn *ct, - enum ip_conntrack_dir dir, - u32 seq); - -extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, - u32 dir, s32 off); - #endif diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index d69483f..8dd8038 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -99,7 +99,8 @@ enum ip_conntrack_events { IPCT_PROTOINFO, /* protocol information has changed */ IPCT_HELPER, /* new helper has been set */ IPCT_MARK, /* new mark has been set */ - IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */ + IPCT_SEQADJ, /* sequence adjustment has changed */ + IPCT_NATSEQADJ = IPCT_SEQADJ, IPCT_SECMARK, /* new security mark has been set */ IPCT_LABEL, /* new connlabel has been set */ }; diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h index 08fabc6..acad6c5 100644 --- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h +++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -42,8 +42,10 @@ enum ctattr_type { CTA_ID, CTA_NAT_DST, CTA_TUPLE_MASTER, - CTA_NAT_SEQ_ADJ_ORIG, - CTA_NAT_SEQ_ADJ_REPLY, + CTA_SEQ_ADJ_ORIG, + CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG, + CTA_SEQ_ADJ_REPLY, + CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY, CTA_SECMARK, /* obsolete */ CTA_ZONE, CTA_SECCTX, @@ -165,6 +167,15 @@ enum ctattr_protonat { }; #define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1) +enum ctattr_seqadj { + CTA_SEQADJ_UNSPEC, + CTA_SEQADJ_CORRECTION_POS, + CTA_SEQADJ_OFFSET_BEFORE, + CTA_SEQADJ_OFFSET_AFTER, + __CTA_SEQADJ_MAX +}; +#define CTA_SEQADJ_MAX (__CTA_SEQADJ_MAX - 1) + enum ctattr_natseq { CTA_NAT_SEQ_UNSPEC, CTA_NAT_SEQ_CORRECTION_POS, diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 0a2e0e3..86f5b34 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -136,11 +137,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum, /* adjust seqs for loopback traffic only in outgoing direction */ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && !nf_is_loopback_packet(skb)) { - typeof(nf_nat_seq_adjust_hook) seq_adjust; - - seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); - if (!seq_adjust || - !seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) { + if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) { NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); return NF_DROP; } diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index c9b6a6e..d6e4dd8 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -158,11 +159,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum, /* adjust seqs for loopback traffic only in outgoing direction */ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && !nf_is_loopback_packet(skb)) { - typeof(nf_nat_seq_adjust_hook) seq_adjust; - - seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); - if (!seq_adjust || - !seq_adjust(skb, ct, ctinfo, protoff)) { + if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); return NF_DROP; } diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index ebfa7dc..89a9c16 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -1,6 +1,6 @@ netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o -nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o +nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index da6f178..00a7a94 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -1326,6 +1327,7 @@ void nf_conntrack_cleanup_end(void) nf_ct_extend_unregister(&nf_ct_zone_extend); #endif nf_conntrack_proto_fini(); + nf_conntrack_seqadj_fini(); nf_conntrack_labels_fini(); nf_conntrack_helper_fini(); nf_conntrack_timeout_fini(); @@ -1531,6 +1533,10 @@ int nf_conntrack_init_start(void) if (ret < 0) goto err_labels; + ret = nf_conntrack_seqadj_init(); + if (ret < 0) + goto err_seqadj; + #ifdef CONFIG_NF_CONNTRACK_ZONES ret = nf_ct_extend_register(&nf_ct_zone_extend); if (ret < 0) @@ -1555,6 +1561,8 @@ err_proto: nf_ct_extend_unregister(&nf_ct_zone_extend); err_extend: #endif + nf_conntrack_seqadj_fini(); +err_seqadj: nf_conntrack_labels_fini(); err_labels: nf_conntrack_helper_fini(); @@ -1577,9 +1585,6 @@ void nf_conntrack_init_end(void) /* For use by REJECT target */ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); - - /* Howto get NAT offsets */ - RCU_INIT_POINTER(nf_ct_nat_offset, NULL); } /* @@ -1666,8 +1671,3 @@ err_slabname: err_stat: return ret; } - -s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, - enum ip_conntrack_dir dir, - u32 seq); -EXPORT_SYMBOL_GPL(nf_ct_nat_offset); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index fa61fea..7c55745 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -381,9 +382,8 @@ nla_put_failure: return -1; } -#ifdef CONFIG_NF_NAT_NEEDED static int -dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) +dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type) { struct nlattr *nest_parms; @@ -391,12 +391,12 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) if (!nest_parms) goto nla_put_failure; - if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS, - htonl(natseq->correction_pos)) || - nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, - htonl(natseq->offset_before)) || - nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER, - htonl(natseq->offset_after))) + if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS, + htonl(seq->correction_pos)) || + nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE, + htonl(seq->offset_before)) || + nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER, + htonl(seq->offset_after))) goto nla_put_failure; nla_nest_end(skb, nest_parms); @@ -408,27 +408,24 @@ nla_put_failure: } static inline int -ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) +ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) { - struct nf_nat_seq *natseq; - struct nf_conn_nat *nat = nfct_nat(ct); + struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); + struct nf_ct_seqadj *seq; - if (!(ct->status & IPS_SEQ_ADJUST) || !nat) + if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj) return 0; - natseq = &nat->seq[IP_CT_DIR_ORIGINAL]; - if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1) + seq = &seqadj->seq[IP_CT_DIR_ORIGINAL]; + if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1) return -1; - natseq = &nat->seq[IP_CT_DIR_REPLY]; - if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1) + seq = &seqadj->seq[IP_CT_DIR_REPLY]; + if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1) return -1; return 0; } -#else -#define ctnetlink_dump_nat_seq_adj(a, b) (0) -#endif static inline int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) @@ -502,7 +499,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 || ctnetlink_dump_master(skb, ct) < 0 || - ctnetlink_dump_nat_seq_adj(skb, ct) < 0) + ctnetlink_dump_ct_seq_adj(skb, ct) < 0) goto nla_put_failure; nlmsg_end(skb, nlh); @@ -707,8 +704,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) ctnetlink_dump_master(skb, ct) < 0) goto nla_put_failure; - if (events & (1 << IPCT_NATSEQADJ) && - ctnetlink_dump_nat_seq_adj(skb, ct) < 0) + if (events & (1 << IPCT_SEQADJ) && + ctnetlink_dump_ct_seq_adj(skb, ct) < 0) goto nla_put_failure; } @@ -1439,66 +1436,65 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[] return err; } -#ifdef CONFIG_NF_NAT_NEEDED -static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = { - [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 }, - [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 }, - [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 }, +static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = { + [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 }, + [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 }, + [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 }, }; static inline int -change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) +change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr) { int err; - struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; + struct nlattr *cda[CTA_SEQADJ_MAX+1]; - err = nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy); + err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy); if (err < 0) return err; - if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) + if (!cda[CTA_SEQADJ_CORRECTION_POS]) return -EINVAL; - natseq->correction_pos = - ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS])); + seq->correction_pos = + ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS])); - if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE]) + if (!cda[CTA_SEQADJ_OFFSET_BEFORE]) return -EINVAL; - natseq->offset_before = - ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE])); + seq->offset_before = + ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE])); - if (!cda[CTA_NAT_SEQ_OFFSET_AFTER]) + if (!cda[CTA_SEQADJ_OFFSET_AFTER]) return -EINVAL; - natseq->offset_after = - ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER])); + seq->offset_after = + ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER])); return 0; } static int -ctnetlink_change_nat_seq_adj(struct nf_conn *ct, - const struct nlattr * const cda[]) +ctnetlink_change_seq_adj(struct nf_conn *ct, + const struct nlattr * const cda[]) { + struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); int ret = 0; - struct nf_conn_nat *nat = nfct_nat(ct); - if (!nat) + if (!seqadj) return 0; - if (cda[CTA_NAT_SEQ_ADJ_ORIG]) { - ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL], - cda[CTA_NAT_SEQ_ADJ_ORIG]); + if (cda[CTA_SEQ_ADJ_ORIG]) { + ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL], + cda[CTA_SEQ_ADJ_ORIG]); if (ret < 0) return ret; ct->status |= IPS_SEQ_ADJUST; } - if (cda[CTA_NAT_SEQ_ADJ_REPLY]) { - ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY], - cda[CTA_NAT_SEQ_ADJ_REPLY]); + if (cda[CTA_SEQ_ADJ_REPLY]) { + ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY], + cda[CTA_SEQ_ADJ_REPLY]); if (ret < 0) return ret; @@ -1507,7 +1503,6 @@ ctnetlink_change_nat_seq_adj(struct nf_conn *ct, return 0; } -#endif static int ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[]) @@ -1573,13 +1568,12 @@ ctnetlink_change_conntrack(struct nf_conn *ct, ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); #endif -#ifdef CONFIG_NF_NAT_NEEDED - if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { - err = ctnetlink_change_nat_seq_adj(ct, cda); + if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { + err = ctnetlink_change_seq_adj(ct, cda); if (err < 0) return err; } -#endif + if (cda[CTA_LABELS]) { err = ctnetlink_attach_labels(ct, cda); if (err < 0) @@ -1684,13 +1678,11 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, goto err2; } -#ifdef CONFIG_NF_NAT_NEEDED - if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { - err = ctnetlink_change_nat_seq_adj(ct, cda); + if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { + err = ctnetlink_change_seq_adj(ct, cda); if (err < 0) goto err2; } -#endif memset(&ct->proto, 0, sizeof(ct->proto)); if (cda[CTA_PROTOINFO]) { @@ -1804,7 +1796,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | (1 << IPCT_PROTOINFO) | - (1 << IPCT_NATSEQADJ) | + (1 << IPCT_SEQADJ) | (1 << IPCT_MARK) | events, ct, NETLINK_CB(skb).portid, nlmsg_report(nlh)); @@ -1827,7 +1819,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, (1 << IPCT_HELPER) | (1 << IPCT_LABEL) | (1 << IPCT_PROTOINFO) | - (1 << IPCT_NATSEQADJ) | + (1 << IPCT_SEQADJ) | (1 << IPCT_MARK), ct, NETLINK_CB(skb).portid, nlmsg_report(nlh)); @@ -2082,7 +2074,7 @@ ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct) goto nla_put_failure; if ((ct->status & IPS_SEQ_ADJUST) && - ctnetlink_dump_nat_seq_adj(skb, ct) < 0) + ctnetlink_dump_ct_seq_adj(skb, ct) < 0) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_MARK @@ -2211,6 +2203,7 @@ static struct nfq_ct_hook ctnetlink_nfqueue_hook = { .build = ctnetlink_nfqueue_build, .parse = ctnetlink_nfqueue_parse, .attach_expect = ctnetlink_nfqueue_attach_expect, + .seq_adjust = nf_ct_tcp_seqadj_set, }; #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */ diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index d224e00..984a8d1 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -495,21 +496,6 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, } } -#ifdef CONFIG_NF_NAT_NEEDED -static inline s32 nat_offset(const struct nf_conn *ct, - enum ip_conntrack_dir dir, - u32 seq) -{ - typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset); - - return get_offset != NULL ? get_offset(ct, dir, seq) : 0; -} -#define NAT_OFFSET(ct, dir, seq) \ - (nat_offset(ct, dir, seq)) -#else -#define NAT_OFFSET(ct, dir, seq) 0 -#endif - static bool tcp_in_window(const struct nf_conn *ct, struct ip_ct_tcp *state, enum ip_conntrack_dir dir, @@ -540,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct, tcp_sack(skb, dataoff, tcph, &sack); /* Take into account NAT sequence number mangling */ - receiver_offset = NAT_OFFSET(ct, !dir, ack - 1); + receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1); ack -= receiver_offset; sack -= receiver_offset; diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c new file mode 100644 index 0000000..483eb9c --- /dev/null +++ b/net/netfilter/nf_conntrack_seqadj.c @@ -0,0 +1,218 @@ +#include +#include +#include + +#include +#include +#include + +int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + __be32 seq, s32 off) +{ + struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + struct nf_ct_seqadj *this_way; + + if (off == 0) + return 0; + + set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); + + spin_lock_bh(&ct->lock); + this_way = &seqadj->seq[dir]; + if (this_way->offset_before == this_way->offset_after || + before(this_way->correction_pos, seq)) { + this_way->correction_pos = seq; + this_way->offset_before = this_way->offset_after; + this_way->offset_after += off; + } + spin_unlock_bh(&ct->lock); + return 0; +} +EXPORT_SYMBOL_GPL(nf_ct_seqadj_set); + +void nf_ct_tcp_seqadj_set(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + s32 off) +{ + const struct tcphdr *th; + + if (nf_ct_protonum(ct) != IPPROTO_TCP) + return; + + th = (struct tcphdr *)(skb_network_header(skb) + ip_hdrlen(skb)); + nf_ct_seqadj_set(ct, ctinfo, th->seq, off); +} +EXPORT_SYMBOL_GPL(nf_ct_tcp_seqadj_set); + +/* Adjust one found SACK option including checksum correction */ +static void nf_ct_sack_block_adjust(struct sk_buff *skb, + struct tcphdr *tcph, + unsigned int sackoff, + unsigned int sackend, + struct nf_ct_seqadj *seq) +{ + while (sackoff < sackend) { + struct tcp_sack_block_wire *sack; + __be32 new_start_seq, new_end_seq; + + sack = (void *)skb->data + sackoff; + if (after(ntohl(sack->start_seq) - seq->offset_before, + seq->correction_pos)) + new_start_seq = htonl(ntohl(sack->start_seq) - + seq->offset_after); + else + new_start_seq = htonl(ntohl(sack->start_seq) - + seq->offset_before); + + if (after(ntohl(sack->end_seq) - seq->offset_before, + seq->correction_pos)) + new_end_seq = htonl(ntohl(sack->end_seq) - + seq->offset_after); + else + new_end_seq = htonl(ntohl(sack->end_seq) - + seq->offset_before); + + pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", + ntohl(sack->start_seq), new_start_seq, + ntohl(sack->end_seq), new_end_seq); + + inet_proto_csum_replace4(&tcph->check, skb, + sack->start_seq, new_start_seq, 0); + inet_proto_csum_replace4(&tcph->check, skb, + sack->end_seq, new_end_seq, 0); + sack->start_seq = new_start_seq; + sack->end_seq = new_end_seq; + sackoff += sizeof(*sack); + } +} + +/* TCP SACK sequence number adjustment */ +static unsigned int nf_ct_sack_adjust(struct sk_buff *skb, + unsigned int protoff, + struct tcphdr *tcph, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo) +{ + unsigned int dir, optoff, optend; + struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); + + optoff = protoff + sizeof(struct tcphdr); + optend = protoff + tcph->doff * 4; + + if (!skb_make_writable(skb, optend)) + return 0; + + dir = CTINFO2DIR(ctinfo); + + while (optoff < optend) { + /* Usually: option, length. */ + unsigned char *op = skb->data + optoff; + + switch (op[0]) { + case TCPOPT_EOL: + return 1; + case TCPOPT_NOP: + optoff++; + continue; + default: + /* no partial options */ + if (optoff + 1 == optend || + optoff + op[1] > optend || + op[1] < 2) + return 0; + if (op[0] == TCPOPT_SACK && + op[1] >= 2+TCPOLEN_SACK_PERBLOCK && + ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) + nf_ct_sack_block_adjust(skb, tcph, optoff + 2, + optoff+op[1], + &seqadj->seq[!dir]); + optoff += op[1]; + } + } + return 1; +} + +/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ +int nf_ct_seq_adjust(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff) +{ + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + struct tcphdr *tcph; + __be32 newseq, newack; + s32 seqoff, ackoff; + struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); + struct nf_ct_seqadj *this_way, *other_way; + int res; + + this_way = &seqadj->seq[dir]; + other_way = &seqadj->seq[!dir]; + + if (!skb_make_writable(skb, protoff + sizeof(*tcph))) + return 0; + + tcph = (void *)skb->data + protoff; + spin_lock_bh(&ct->lock); + if (after(ntohl(tcph->seq), this_way->correction_pos)) + seqoff = this_way->offset_after; + else + seqoff = this_way->offset_before; + + if (after(ntohl(tcph->ack_seq) - other_way->offset_before, + other_way->correction_pos)) + ackoff = other_way->offset_after; + else + ackoff = other_way->offset_before; + + newseq = htonl(ntohl(tcph->seq) + seqoff); + newack = htonl(ntohl(tcph->ack_seq) - ackoff); + + inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); + inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); + + pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n", + ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), + ntohl(newack)); + + tcph->seq = newseq; + tcph->ack_seq = newack; + + res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo); + spin_unlock_bh(&ct->lock); + + return res; +} +EXPORT_SYMBOL_GPL(nf_ct_seq_adjust); + +s32 nf_ct_seq_offset(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq) +{ + struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); + struct nf_ct_seqadj *this_way; + + if (!seqadj) + return 0; + + this_way = &seqadj->seq[dir]; + return after(seq, this_way->correction_pos) ? + this_way->offset_after : this_way->offset_before; +} +EXPORT_SYMBOL_GPL(nf_ct_seq_offset); + +static struct nf_ct_ext_type nf_ct_seqadj_extend __read_mostly = { + .len = sizeof(struct nf_conn_seqadj), + .align = __alignof__(struct nf_conn_seqadj), + .id = NF_CT_EXT_SEQADJ, +}; + +int nf_conntrack_seqadj_init(void) +{ + return nf_ct_extend_register(&nf_ct_seqadj_extend); +} + +void nf_conntrack_seqadj_fini(void) +{ + nf_ct_extend_unregister(&nf_ct_seqadj_extend); +} diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 6ff8083..6f0f4f7 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -402,6 +403,9 @@ nf_nat_setup_info(struct nf_conn *ct, ct->status |= IPS_SRC_NAT; else ct->status |= IPS_DST_NAT; + + if (nfct_help(ct)) + nfct_seqadj_ext_add(ct); } if (maniptype == NF_NAT_MANIP_SRC) { @@ -764,10 +768,6 @@ static struct nf_ct_helper_expectfn follow_master_nat = { .expectfn = nf_nat_follow_master, }; -static struct nfq_ct_nat_hook nfq_ct_nat = { - .seq_adjust = nf_nat_tcp_seq_adjust, -}; - static int __init nf_nat_init(void) { int ret; @@ -787,14 +787,9 @@ static int __init nf_nat_init(void) /* Initialize fake conntrack so that NAT will skip it */ nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); - BUG_ON(nf_nat_seq_adjust_hook != NULL); - RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, nfnetlink_parse_nat_setup); - BUG_ON(nf_ct_nat_offset != NULL); - RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset); - RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat); #ifdef CONFIG_XFRM BUG_ON(nf_nat_decode_session_hook != NULL); RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); @@ -813,10 +808,7 @@ static void __exit nf_nat_cleanup(void) unregister_pernet_subsys(&nf_nat_net_ops); nf_ct_extend_unregister(&nat_extend); nf_ct_helper_expectfn_unregister(&follow_master_nat); - RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); - RCU_INIT_POINTER(nf_ct_nat_offset, NULL); - RCU_INIT_POINTER(nfq_ct_nat_hook, NULL); #ifdef CONFIG_XFRM RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); #endif diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index 46b9baa..2840abb 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -20,67 +20,13 @@ #include #include #include +#include #include #include #include #include #include -#define DUMP_OFFSET(x) \ - pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \ - x->offset_before, x->offset_after, x->correction_pos); - -/* Setup TCP sequence correction given this change at this sequence */ -static inline void -adjust_tcp_sequence(u32 seq, - int sizediff, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo) -{ - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - struct nf_conn_nat *nat = nfct_nat(ct); - struct nf_nat_seq *this_way = &nat->seq[dir]; - - pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n", - seq, sizediff); - - pr_debug("adjust_tcp_sequence: Seq_offset before: "); - DUMP_OFFSET(this_way); - - spin_lock_bh(&ct->lock); - - /* SYN adjust. If it's uninitialized, or this is after last - * correction, record it: we don't handle more than one - * adjustment in the window, but do deal with common case of a - * retransmit */ - if (this_way->offset_before == this_way->offset_after || - before(this_way->correction_pos, seq)) { - this_way->correction_pos = seq; - this_way->offset_before = this_way->offset_after; - this_way->offset_after += sizediff; - } - spin_unlock_bh(&ct->lock); - - pr_debug("adjust_tcp_sequence: Seq_offset after: "); - DUMP_OFFSET(this_way); -} - -/* Get the offset value, for conntrack. Caller must have the conntrack locked */ -s32 nf_nat_get_offset(const struct nf_conn *ct, - enum ip_conntrack_dir dir, - u32 seq) -{ - struct nf_conn_nat *nat = nfct_nat(ct); - struct nf_nat_seq *this_way; - - if (!nat) - return 0; - - this_way = &nat->seq[dir]; - return after(seq, this_way->correction_pos) - ? this_way->offset_after : this_way->offset_before; -} - /* Frobs data inside this packet, which is linear. */ static void mangle_contents(struct sk_buff *skb, unsigned int dataoff, @@ -135,30 +81,6 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra) return 1; } -void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo, - __be32 seq, s32 off) -{ - if (!off) - return; - set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); - adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo); - nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); -} -EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); - -void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, - u32 ctinfo, int off) -{ - const struct tcphdr *th; - - if (nf_ct_protonum(ct) != IPPROTO_TCP) - return; - - th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb)); - nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off); -} -EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust); - /* Generic function for mangling variable-length address changes inside * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX * command in FTP). @@ -203,8 +125,8 @@ int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, datalen, oldlen); if (adjust && rep_len != match_len) - nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq, - (int)rep_len - (int)match_len); + nf_ct_seqadj_set(ct, ctinfo, tcph->seq, + (int)rep_len - (int)match_len); return 1; } @@ -264,150 +186,6 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb, } EXPORT_SYMBOL(nf_nat_mangle_udp_packet); -/* Adjust one found SACK option including checksum correction */ -static void -sack_adjust(struct sk_buff *skb, - struct tcphdr *tcph, - unsigned int sackoff, - unsigned int sackend, - struct nf_nat_seq *natseq) -{ - while (sackoff < sackend) { - struct tcp_sack_block_wire *sack; - __be32 new_start_seq, new_end_seq; - - sack = (void *)skb->data + sackoff; - if (after(ntohl(sack->start_seq) - natseq->offset_before, - natseq->correction_pos)) - new_start_seq = htonl(ntohl(sack->start_seq) - - natseq->offset_after); - else - new_start_seq = htonl(ntohl(sack->start_seq) - - natseq->offset_before); - - if (after(ntohl(sack->end_seq) - natseq->offset_before, - natseq->correction_pos)) - new_end_seq = htonl(ntohl(sack->end_seq) - - natseq->offset_after); - else - new_end_seq = htonl(ntohl(sack->end_seq) - - natseq->offset_before); - - pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", - ntohl(sack->start_seq), new_start_seq, - ntohl(sack->end_seq), new_end_seq); - - inet_proto_csum_replace4(&tcph->check, skb, - sack->start_seq, new_start_seq, 0); - inet_proto_csum_replace4(&tcph->check, skb, - sack->end_seq, new_end_seq, 0); - sack->start_seq = new_start_seq; - sack->end_seq = new_end_seq; - sackoff += sizeof(*sack); - } -} - -/* TCP SACK sequence number adjustment */ -static inline unsigned int -nf_nat_sack_adjust(struct sk_buff *skb, - unsigned int protoff, - struct tcphdr *tcph, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo) -{ - unsigned int dir, optoff, optend; - struct nf_conn_nat *nat = nfct_nat(ct); - - optoff = protoff + sizeof(struct tcphdr); - optend = protoff + tcph->doff * 4; - - if (!skb_make_writable(skb, optend)) - return 0; - - dir = CTINFO2DIR(ctinfo); - - while (optoff < optend) { - /* Usually: option, length. */ - unsigned char *op = skb->data + optoff; - - switch (op[0]) { - case TCPOPT_EOL: - return 1; - case TCPOPT_NOP: - optoff++; - continue; - default: - /* no partial options */ - if (optoff + 1 == optend || - optoff + op[1] > optend || - op[1] < 2) - return 0; - if (op[0] == TCPOPT_SACK && - op[1] >= 2+TCPOLEN_SACK_PERBLOCK && - ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) - sack_adjust(skb, tcph, optoff+2, - optoff+op[1], &nat->seq[!dir]); - optoff += op[1]; - } - } - return 1; -} - -/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ -int -nf_nat_seq_adjust(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff) -{ - struct tcphdr *tcph; - int dir; - __be32 newseq, newack; - s32 seqoff, ackoff; - struct nf_conn_nat *nat = nfct_nat(ct); - struct nf_nat_seq *this_way, *other_way; - int res; - - dir = CTINFO2DIR(ctinfo); - - this_way = &nat->seq[dir]; - other_way = &nat->seq[!dir]; - - if (!skb_make_writable(skb, protoff + sizeof(*tcph))) - return 0; - - tcph = (void *)skb->data + protoff; - spin_lock_bh(&ct->lock); - if (after(ntohl(tcph->seq), this_way->correction_pos)) - seqoff = this_way->offset_after; - else - seqoff = this_way->offset_before; - - if (after(ntohl(tcph->ack_seq) - other_way->offset_before, - other_way->correction_pos)) - ackoff = other_way->offset_after; - else - ackoff = other_way->offset_before; - - newseq = htonl(ntohl(tcph->seq) + seqoff); - newack = htonl(ntohl(tcph->ack_seq) - ackoff); - - inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); - inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); - - pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n", - ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), - ntohl(newack)); - - tcph->seq = newseq; - tcph->ack_seq = newack; - - res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo); - spin_unlock_bh(&ct->lock); - - return res; -} - /* Setup NAT on this expected conntrack so it follows master. */ /* If we fail to get a free NAT slot, we'll get dropped on confirm */ void nf_nat_follow_master(struct nf_conn *ct, diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c index dac11f7..f979040 100644 --- a/net/netfilter/nf_nat_sip.c +++ b/net/netfilter/nf_nat_sip.c @@ -20,6 +20,7 @@ #include #include #include +#include #include MODULE_LICENSE("GPL"); @@ -308,7 +309,7 @@ static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff, return; th = (struct tcphdr *)(skb->data + protoff); - nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off); + nf_ct_seqadj_set(ct, ctinfo, th->seq, off); } /* Handles expected signalling connections and media streams */ diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c index be89303..96cac50 100644 --- a/net/netfilter/nfnetlink_queue_ct.c +++ b/net/netfilter/nfnetlink_queue_ct.c @@ -87,14 +87,14 @@ nla_put_failure: void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, int diff) { - struct nfq_ct_nat_hook *nfq_nat_ct; + struct nfq_ct_hook *nfq_ct; - nfq_nat_ct = rcu_dereference(nfq_ct_nat_hook); - if (nfq_nat_ct == NULL) + nfq_ct = rcu_dereference(nfq_ct_hook); + if (nfq_ct == NULL) return; if ((ct->status & IPS_NAT_MASK) && diff) - nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff); + nfq_ct->seq_adjust(skb, ct, ctinfo, diff); } int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, -- cgit v0.10.2 From 0198230b7705eb2386e53778d944e307eef0cc71 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 27 Aug 2013 08:50:13 +0200 Subject: net: syncookies: export cookie_v4_init_sequence/cookie_v4_check Extract the local TCP stack independant parts of tcp_v4_init_sequence() and cookie_v4_check() and export them for use by the upcoming SYNPROXY target. Signed-off-by: Patrick McHardy Acked-by: David S. Miller Tested-by: Martin Topholm Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso diff --git a/include/net/tcp.h b/include/net/tcp.h index 09cb5c1..8e2b636 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -476,9 +476,13 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; +extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, + u32 cookie); extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt); #ifdef CONFIG_SYN_COOKIES +extern u32 __cookie_v4_init_sequence(const struct iphdr *iph, + const struct tcphdr *th, u16 *mssp); extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss); #else diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index b05c96e..14a15c4 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -160,26 +160,33 @@ static __u16 const msstab[] = { * Generate a syncookie. mssp points to the mss, which is returned * rounded down to the value encoded in the cookie. */ -__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) +u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, + u16 *mssp) { - const struct iphdr *iph = ip_hdr(skb); - const struct tcphdr *th = tcp_hdr(skb); int mssind; const __u16 mss = *mssp; - tcp_synq_overflow(sk); - for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) if (mss >= msstab[mssind]) break; *mssp = msstab[mssind]; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); - return secure_tcp_syn_cookie(iph->saddr, iph->daddr, th->source, th->dest, ntohl(th->seq), jiffies / (HZ * 60), mssind); } +EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence); + +__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) +{ + const struct iphdr *iph = ip_hdr(skb); + const struct tcphdr *th = tcp_hdr(skb); + + tcp_synq_overflow(sk); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); + + return __cookie_v4_init_sequence(iph, th, mssp); +} /* * This (misnamed) value is the age of syncookie which is permitted. @@ -192,10 +199,9 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) * Check if a ack sequence number is a valid syncookie. * Return the decoded mss if it is, or 0 if not. */ -static inline int cookie_check(struct sk_buff *skb, __u32 cookie) +int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, + u32 cookie) { - const struct iphdr *iph = ip_hdr(skb); - const struct tcphdr *th = tcp_hdr(skb); __u32 seq = ntohl(th->seq) - 1; __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr, th->source, th->dest, seq, @@ -204,6 +210,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; } +EXPORT_SYMBOL_GPL(__cookie_v4_check); static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, @@ -284,7 +291,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, goto out; if (tcp_synq_no_recent_overflow(sk) || - (mss = cookie_check(skb, cookie)) == 0) { + (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } -- cgit v0.10.2 From 48b1de4c110a7afa4b85862f6c75af817db26fad Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 27 Aug 2013 08:50:14 +0200 Subject: netfilter: add SYNPROXY core/target Add a SYNPROXY for netfilter. The code is split into two parts, the synproxy core with common functions and an address family specific target. The SYNPROXY receives the connection request from the client, responds with a SYN/ACK containing a SYN cookie and announcing a zero window and checks whether the final ACK from the client contains a valid cookie. It then establishes a connection to the original destination and, if successful, sends a window update to the client with the window size announced by the server. Support for timestamps, SACK, window scaling and MSS options can be statically configured as target parameters if the features of the server are known. If timestamps are used, the timestamp value sent back to the client in the SYN/ACK will be different from the real timestamp of the server. In order to now break PAWS, the timestamps are translated in the direction server->client. Signed-off-by: Patrick McHardy Tested-by: Martin Topholm Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 2a22bcb..ff95434 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -9,8 +9,8 @@ enum nf_ct_ext_id { NF_CT_EXT_HELPER, #if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) NF_CT_EXT_NAT, - NF_CT_EXT_SEQADJ, #endif + NF_CT_EXT_SEQADJ, NF_CT_EXT_ACCT, #ifdef CONFIG_NF_CONNTRACK_EVENTS NF_CT_EXT_ECACHE, @@ -27,6 +27,9 @@ enum nf_ct_ext_id { #ifdef CONFIG_NF_CONNTRACK_LABELS NF_CT_EXT_LABELS, #endif +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + NF_CT_EXT_SYNPROXY, +#endif NF_CT_EXT_NUM, }; @@ -39,6 +42,7 @@ enum nf_ct_ext_id { #define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp #define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout #define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels +#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h index 30bfbbe..f6177a5 100644 --- a/include/net/netfilter/nf_conntrack_seqadj.h +++ b/include/net/netfilter/nf_conntrack_seqadj.h @@ -30,6 +30,8 @@ static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct) return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC); } +extern int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + s32 off); extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, __be32 seq, s32 off); extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb, diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h new file mode 100644 index 0000000..806f54a --- /dev/null +++ b/include/net/netfilter/nf_conntrack_synproxy.h @@ -0,0 +1,77 @@ +#ifndef _NF_CONNTRACK_SYNPROXY_H +#define _NF_CONNTRACK_SYNPROXY_H + +#include + +struct nf_conn_synproxy { + u32 isn; + u32 its; + u32 tsoff; +}; + +static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct) +{ +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY); +#else + return NULL; +#endif +} + +static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct) +{ +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC); +#else + return NULL; +#endif +} + +struct synproxy_stats { + unsigned int syn_received; + unsigned int cookie_invalid; + unsigned int cookie_valid; + unsigned int cookie_retrans; + unsigned int conn_reopened; +}; + +struct synproxy_net { + struct nf_conn *tmpl; + struct synproxy_stats __percpu *stats; +}; + +extern int synproxy_net_id; +static inline struct synproxy_net *synproxy_pernet(struct net *net) +{ + return net_generic(net, synproxy_net_id); +} + +struct synproxy_options { + u8 options; + u8 wscale; + u16 mss; + u32 tsval; + u32 tsecr; +}; + +struct tcphdr; +struct xt_synproxy_info; +extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, + const struct tcphdr *th, + struct synproxy_options *opts); +extern unsigned int synproxy_options_size(const struct synproxy_options *opts); +extern void synproxy_build_options(struct tcphdr *th, + const struct synproxy_options *opts); + +extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info, + struct synproxy_options *opts); +extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts); + +extern unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, + unsigned int protoff, + struct tcphdr *th, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + const struct nf_conn_synproxy *synproxy); + +#endif /* _NF_CONNTRACK_SYNPROXY_H */ diff --git a/include/uapi/linux/netfilter/xt_SYNPROXY.h b/include/uapi/linux/netfilter/xt_SYNPROXY.h new file mode 100644 index 0000000..2d59fba --- /dev/null +++ b/include/uapi/linux/netfilter/xt_SYNPROXY.h @@ -0,0 +1,16 @@ +#ifndef _XT_SYNPROXY_H +#define _XT_SYNPROXY_H + +#define XT_SYNPROXY_OPT_MSS 0x01 +#define XT_SYNPROXY_OPT_WSCALE 0x02 +#define XT_SYNPROXY_OPT_SACK_PERM 0x04 +#define XT_SYNPROXY_OPT_TIMESTAMP 0x08 +#define XT_SYNPROXY_OPT_ECN 0x10 + +struct xt_synproxy_info { + __u8 options; + __u8 wscale; + __u16 mss; +}; + +#endif /* _XT_SYNPROXY_H */ diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 4e90280..1657e39b 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -110,6 +110,19 @@ config IP_NF_TARGET_REJECT To compile it as a module, choose M here. If unsure, say N. +config IP_NF_TARGET_SYNPROXY + tristate "SYNPROXY target support" + depends on NF_CONNTRACK && NETFILTER_ADVANCED + select NETFILTER_SYNPROXY + select SYN_COOKIES + help + The SYNPROXY target allows you to intercept TCP connections and + establish them using syncookies before they are passed on to the + server. This allows to avoid conntrack and server resource usage + during SYN-flood attacks. + + To compile it as a module, choose M here. If unsure, say N. + config IP_NF_TARGET_ULOG tristate "ULOG target support (obsolete)" default m if NETFILTER_ADVANCED=n diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 007b128..3622b24 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o +obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o # generic ARP tables diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c new file mode 100644 index 0000000..94371db --- /dev/null +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -0,0 +1,472 @@ +/* + * Copyright (c) 2013 Patrick McHardy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static struct iphdr * +synproxy_build_ip(struct sk_buff *skb, u32 saddr, u32 daddr) +{ + struct iphdr *iph; + + skb_reset_network_header(skb); + iph = (struct iphdr *)skb_put(skb, sizeof(*iph)); + iph->version = 4; + iph->ihl = sizeof(*iph) / 4; + iph->tos = 0; + iph->id = 0; + iph->frag_off = htons(IP_DF); + iph->ttl = sysctl_ip_default_ttl; + iph->protocol = IPPROTO_TCP; + iph->check = 0; + iph->saddr = saddr; + iph->daddr = daddr; + + return iph; +} + +static void +synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, + struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, + struct iphdr *niph, struct tcphdr *nth, + unsigned int tcp_hdr_size) +{ + nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0); + nskb->ip_summed = CHECKSUM_PARTIAL; + nskb->csum_start = (unsigned char *)nth - nskb->head; + nskb->csum_offset = offsetof(struct tcphdr, check); + + skb_dst_set_noref(nskb, skb_dst(skb)); + nskb->protocol = htons(ETH_P_IP); + if (ip_route_me_harder(nskb, RTN_UNSPEC)) + goto free_nskb; + + if (nfct) { + nskb->nfct = nfct; + nskb->nfctinfo = ctinfo; + nf_conntrack_get(nfct); + } + + ip_local_out(nskb); + return; + +free_nskb: + kfree_skb(nskb); +} + +static void +synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts) +{ + struct sk_buff *nskb; + struct iphdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + u16 mss = opts->mss; + + iph = ip_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->dest; + nth->dest = th->source; + nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss)); + nth->ack_seq = htonl(ntohl(th->seq) + 1); + tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK; + if (opts->options & XT_SYNPROXY_OPT_ECN) + tcp_flag_word(nth) |= TCP_FLAG_ECE; + nth->doff = tcp_hdr_size / 4; + nth->window = 0; + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); +} + +static void +synproxy_send_server_syn(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts, u32 recv_seq) +{ + struct sk_buff *nskb; + struct iphdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + + iph = ip_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->source; + nth->dest = th->dest; + nth->seq = htonl(recv_seq - 1); + /* ack_seq is used to relay our ISN to the synproxy hook to initialize + * sequence number translation once a connection tracking entry exists. + */ + nth->ack_seq = htonl(ntohl(th->ack_seq) - 1); + tcp_flag_word(nth) = TCP_FLAG_SYN; + if (opts->options & XT_SYNPROXY_OPT_ECN) + tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR; + nth->doff = tcp_hdr_size / 4; + nth->window = th->window; + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, + niph, nth, tcp_hdr_size); +} + +static void +synproxy_send_server_ack(const struct synproxy_net *snet, + const struct ip_ct_tcp *state, + const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts) +{ + struct sk_buff *nskb; + struct iphdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + + iph = ip_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->dest; + nth->dest = th->source; + nth->seq = htonl(ntohl(th->ack_seq)); + nth->ack_seq = htonl(ntohl(th->seq) + 1); + tcp_flag_word(nth) = TCP_FLAG_ACK; + nth->doff = tcp_hdr_size / 4; + nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin); + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); +} + +static void +synproxy_send_client_ack(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts) +{ + struct sk_buff *nskb; + struct iphdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + + iph = ip_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->source; + nth->dest = th->dest; + nth->seq = htonl(ntohl(th->seq) + 1); + nth->ack_seq = th->ack_seq; + tcp_flag_word(nth) = TCP_FLAG_ACK; + nth->doff = tcp_hdr_size / 4; + nth->window = ntohs(htons(th->window) >> opts->wscale); + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); +} + +static bool +synproxy_recv_client_ack(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, + struct synproxy_options *opts, u32 recv_seq) +{ + int mss; + + mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); + if (mss == 0) { + this_cpu_inc(snet->stats->cookie_invalid); + return false; + } + + this_cpu_inc(snet->stats->cookie_valid); + opts->mss = mss; + + if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy_check_timestamp_cookie(opts); + + synproxy_send_server_syn(snet, skb, th, opts, recv_seq); + return true; +} + +static unsigned int +synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) +{ + const struct xt_synproxy_info *info = par->targinfo; + struct synproxy_net *snet = synproxy_pernet(dev_net(par->in)); + struct synproxy_options opts = {}; + struct tcphdr *th, _th; + + if (nf_ip_checksum(skb, par->hooknum, par->thoff, IPPROTO_TCP)) + return NF_DROP; + + th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th); + if (th == NULL) + return NF_DROP; + + synproxy_parse_options(skb, par->thoff, th, &opts); + + if (th->syn && !th->ack) { + /* Initial SYN from client */ + this_cpu_inc(snet->stats->syn_received); + + if (th->ece && th->cwr) + opts.options |= XT_SYNPROXY_OPT_ECN; + + opts.options &= info->options; + if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy_init_timestamp_cookie(info, &opts); + else + opts.options &= ~(XT_SYNPROXY_OPT_WSCALE | + XT_SYNPROXY_OPT_SACK_PERM | + XT_SYNPROXY_OPT_ECN); + + synproxy_send_client_synack(skb, th, &opts); + } else if (th->ack && !(th->fin || th->rst)) + /* ACK from client */ + synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); + + return NF_DROP; +} + +static unsigned int ipv4_synproxy_hook(unsigned int hooknum, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out)); + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + struct nf_conn_synproxy *synproxy; + struct synproxy_options opts = {}; + const struct ip_ct_tcp *state; + struct tcphdr *th, _th; + unsigned int thoff; + + ct = nf_ct_get(skb, &ctinfo); + if (ct == NULL) + return NF_ACCEPT; + + synproxy = nfct_synproxy(ct); + if (synproxy == NULL) + return NF_ACCEPT; + + if (nf_is_loopback_packet(skb)) + return NF_ACCEPT; + + thoff = ip_hdrlen(skb); + th = skb_header_pointer(skb, thoff, sizeof(_th), &_th); + if (th == NULL) + return NF_DROP; + + state = &ct->proto.tcp; + switch (state->state) { + case TCP_CONNTRACK_CLOSE: + if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { + nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - + ntohl(th->seq) + 1); + break; + } + + if (!th->syn || th->ack || + CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) + break; + + /* Reopened connection - reset the sequence number and timestamp + * adjustments, they will get initialized once the connection is + * reestablished. + */ + nf_ct_seqadj_init(ct, ctinfo, 0); + synproxy->tsoff = 0; + this_cpu_inc(snet->stats->conn_reopened); + + /* fall through */ + case TCP_CONNTRACK_SYN_SENT: + synproxy_parse_options(skb, thoff, th, &opts); + + if (!th->syn && th->ack && + CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { + /* Keep-Alives are sent with SEG.SEQ = SND.NXT-1, + * therefore we need to add 1 to make the SYN sequence + * number match the one of first SYN. + */ + if (synproxy_recv_client_ack(snet, skb, th, &opts, + ntohl(th->seq) + 1)) + this_cpu_inc(snet->stats->cookie_retrans); + + return NF_DROP; + } + + synproxy->isn = ntohl(th->ack_seq); + if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy->its = opts.tsecr; + break; + case TCP_CONNTRACK_SYN_RECV: + if (!th->syn || !th->ack) + break; + + synproxy_parse_options(skb, thoff, th, &opts); + if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy->tsoff = opts.tsval - synproxy->its; + + opts.options &= ~(XT_SYNPROXY_OPT_MSS | + XT_SYNPROXY_OPT_WSCALE | + XT_SYNPROXY_OPT_SACK_PERM); + + swap(opts.tsval, opts.tsecr); + synproxy_send_server_ack(snet, state, skb, th, &opts); + + nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); + + swap(opts.tsval, opts.tsecr); + synproxy_send_client_ack(snet, skb, th, &opts); + + consume_skb(skb); + return NF_STOLEN; + default: + break; + } + + synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy); + return NF_ACCEPT; +} + +static int synproxy_tg4_check(const struct xt_tgchk_param *par) +{ + const struct ipt_entry *e = par->entryinfo; + + if (e->ip.proto != IPPROTO_TCP || + e->ip.invflags & XT_INV_PROTO) + return -EINVAL; + + return nf_ct_l3proto_try_module_get(par->family); +} + +static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) +{ + nf_ct_l3proto_module_put(par->family); +} + +static struct xt_target synproxy_tg4_reg __read_mostly = { + .name = "SYNPROXY", + .family = NFPROTO_IPV4, + .target = synproxy_tg4, + .targetsize = sizeof(struct xt_synproxy_info), + .checkentry = synproxy_tg4_check, + .destroy = synproxy_tg4_destroy, + .me = THIS_MODULE, +}; + +static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = { + { + .hook = ipv4_synproxy_hook, + .owner = THIS_MODULE, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, + { + .hook = ipv4_synproxy_hook, + .owner = THIS_MODULE, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, +}; + +static int __init synproxy_tg4_init(void) +{ + int err; + + err = nf_register_hooks(ipv4_synproxy_ops, + ARRAY_SIZE(ipv4_synproxy_ops)); + if (err < 0) + goto err1; + + err = xt_register_target(&synproxy_tg4_reg); + if (err < 0) + goto err2; + + return 0; + +err2: + nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops)); +err1: + return err; +} + +static void __exit synproxy_tg4_exit(void) +{ + xt_unregister_target(&synproxy_tg4_reg); + nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops)); +} + +module_init(synproxy_tg4_init); +module_exit(synproxy_tg4_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick McHardy "); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index c45fc1a..62a171a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -408,6 +408,9 @@ config NF_NAT_TFTP depends on NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_TFTP +config NETFILTER_SYNPROXY + tristate + endif # NF_CONNTRACK config NETFILTER_XTABLES diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 89a9c16..c3a0a12 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -61,6 +61,9 @@ obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o +# SYNPROXY +obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o + # generic X tables obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 00a7a94..5d892fe 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -799,6 +800,11 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, if (IS_ERR(ct)) return (struct nf_conntrack_tuple_hash *)ct; + if (tmpl && nfct_synproxy(tmpl)) { + nfct_seqadj_ext_add(ct); + nfct_synproxy_ext_add(ct); + } + timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; if (timeout_ext) timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 984a8d1..44d1ea3 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -946,6 +947,21 @@ static int tcp_packet(struct nf_conn *ct, "state %s ", tcp_conntrack_names[old_state]); return NF_ACCEPT; case TCP_CONNTRACK_MAX: + /* Special case for SYN proxy: when the SYN to the server or + * the SYN/ACK from the server is lost, the client may transmit + * a keep-alive packet while in SYN_SENT state. This needs to + * be associated with the original conntrack entry in order to + * generate a new SYN with the correct sequence number. + */ + if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT && + index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL && + ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL && + ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) { + pr_debug("nf_ct_tcp: SYN proxy client keep alive\n"); + spin_unlock_bh(&ct->lock); + return NF_ACCEPT; + } + /* Invalid packet */ pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n", dir, get_conntrack_index(th), old_state); diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index 483eb9c..5f9bfd0 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c @@ -6,6 +6,26 @@ #include #include +int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + s32 off) +{ + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + struct nf_conn_seqadj *seqadj; + struct nf_ct_seqadj *this_way; + + if (off == 0) + return 0; + + set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); + + seqadj = nfct_seqadj(ct); + this_way = &seqadj->seq[dir]; + this_way->offset_before = off; + this_way->offset_after = off; + return 0; +} +EXPORT_SYMBOL_GPL(nf_ct_seqadj_init); + int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, __be32 seq, s32 off) { diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c new file mode 100644 index 0000000..d23dc79 --- /dev/null +++ b/net/netfilter/nf_synproxy_core.c @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2013 Patrick McHardy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +int synproxy_net_id; +EXPORT_SYMBOL_GPL(synproxy_net_id); + +void +synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, + const struct tcphdr *th, struct synproxy_options *opts) +{ + int length = (th->doff * 4) - sizeof(*th); + u8 buf[40], *ptr; + + ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); + BUG_ON(ptr == NULL); + + opts->options = 0; + while (length > 0) { + int opcode = *ptr++; + int opsize; + + switch (opcode) { + case TCPOPT_EOL: + return; + case TCPOPT_NOP: + length--; + continue; + default: + opsize = *ptr++; + if (opsize < 2) + return; + if (opsize > length) + return; + + switch (opcode) { + case TCPOPT_MSS: + if (opsize == TCPOLEN_MSS) { + opts->mss = get_unaligned_be16(ptr); + opts->options |= XT_SYNPROXY_OPT_MSS; + } + break; + case TCPOPT_WINDOW: + if (opsize == TCPOLEN_WINDOW) { + opts->wscale = *ptr; + if (opts->wscale > 14) + opts->wscale = 14; + opts->options |= XT_SYNPROXY_OPT_WSCALE; + } + break; + case TCPOPT_TIMESTAMP: + if (opsize == TCPOLEN_TIMESTAMP) { + opts->tsval = get_unaligned_be32(ptr); + opts->tsecr = get_unaligned_be32(ptr + 4); + opts->options |= XT_SYNPROXY_OPT_TIMESTAMP; + } + break; + case TCPOPT_SACK_PERM: + if (opsize == TCPOLEN_SACK_PERM) + opts->options |= XT_SYNPROXY_OPT_SACK_PERM; + break; + } + + ptr += opsize - 2; + length -= opsize; + } + } +} +EXPORT_SYMBOL_GPL(synproxy_parse_options); + +unsigned int synproxy_options_size(const struct synproxy_options *opts) +{ + unsigned int size = 0; + + if (opts->options & XT_SYNPROXY_OPT_MSS) + size += TCPOLEN_MSS_ALIGNED; + if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) + size += TCPOLEN_TSTAMP_ALIGNED; + else if (opts->options & XT_SYNPROXY_OPT_SACK_PERM) + size += TCPOLEN_SACKPERM_ALIGNED; + if (opts->options & XT_SYNPROXY_OPT_WSCALE) + size += TCPOLEN_WSCALE_ALIGNED; + + return size; +} +EXPORT_SYMBOL_GPL(synproxy_options_size); + +void +synproxy_build_options(struct tcphdr *th, const struct synproxy_options *opts) +{ + __be32 *ptr = (__be32 *)(th + 1); + u8 options = opts->options; + + if (options & XT_SYNPROXY_OPT_MSS) + *ptr++ = htonl((TCPOPT_MSS << 24) | + (TCPOLEN_MSS << 16) | + opts->mss); + + if (options & XT_SYNPROXY_OPT_TIMESTAMP) { + if (options & XT_SYNPROXY_OPT_SACK_PERM) + *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | + (TCPOLEN_SACK_PERM << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP); + else + *ptr++ = htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP); + + *ptr++ = htonl(opts->tsval); + *ptr++ = htonl(opts->tsecr); + } else if (options & XT_SYNPROXY_OPT_SACK_PERM) + *ptr++ = htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_SACK_PERM << 8) | + TCPOLEN_SACK_PERM); + + if (options & XT_SYNPROXY_OPT_WSCALE) + *ptr++ = htonl((TCPOPT_NOP << 24) | + (TCPOPT_WINDOW << 16) | + (TCPOLEN_WINDOW << 8) | + opts->wscale); +} +EXPORT_SYMBOL_GPL(synproxy_build_options); + +void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info, + struct synproxy_options *opts) +{ + opts->tsecr = opts->tsval; + opts->tsval = tcp_time_stamp & ~0x3f; + + if (opts->options & XT_SYNPROXY_OPT_WSCALE) + opts->tsval |= info->wscale; + else + opts->tsval |= 0xf; + + if (opts->options & XT_SYNPROXY_OPT_SACK_PERM) + opts->tsval |= 1 << 4; + + if (opts->options & XT_SYNPROXY_OPT_ECN) + opts->tsval |= 1 << 5; +} +EXPORT_SYMBOL_GPL(synproxy_init_timestamp_cookie); + +void synproxy_check_timestamp_cookie(struct synproxy_options *opts) +{ + opts->wscale = opts->tsecr & 0xf; + if (opts->wscale != 0xf) + opts->options |= XT_SYNPROXY_OPT_WSCALE; + + opts->options |= opts->tsecr & (1 << 4) ? XT_SYNPROXY_OPT_SACK_PERM : 0; + + opts->options |= opts->tsecr & (1 << 5) ? XT_SYNPROXY_OPT_ECN : 0; +} +EXPORT_SYMBOL_GPL(synproxy_check_timestamp_cookie); + +unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, + unsigned int protoff, + struct tcphdr *th, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + const struct nf_conn_synproxy *synproxy) +{ + unsigned int optoff, optend; + u32 *ptr, old; + + if (synproxy->tsoff == 0) + return 1; + + optoff = protoff + sizeof(struct tcphdr); + optend = protoff + th->doff * 4; + + if (!skb_make_writable(skb, optend)) + return 0; + + while (optoff < optend) { + unsigned char *op = skb->data + optoff; + + switch (op[0]) { + case TCPOPT_EOL: + return 1; + case TCPOPT_NOP: + optoff++; + continue; + default: + if (optoff + 1 == optend || + optoff + op[1] > optend || + op[1] < 2) + return 0; + if (op[0] == TCPOPT_TIMESTAMP && + op[1] == TCPOLEN_TIMESTAMP) { + if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { + ptr = (u32 *)&op[2]; + old = *ptr; + *ptr = htonl(ntohl(*ptr) - + synproxy->tsoff); + } else { + ptr = (u32 *)&op[6]; + old = *ptr; + *ptr = htonl(ntohl(*ptr) + + synproxy->tsoff); + } + inet_proto_csum_replace4(&th->check, skb, + old, *ptr, 0); + return 1; + } + optoff += op[1]; + } + } + return 1; +} +EXPORT_SYMBOL_GPL(synproxy_tstamp_adjust); + +static struct nf_ct_ext_type nf_ct_synproxy_extend __read_mostly = { + .len = sizeof(struct nf_conn_synproxy), + .align = __alignof__(struct nf_conn_synproxy), + .id = NF_CT_EXT_SYNPROXY, +}; + +#ifdef CONFIG_PROC_FS +static void *synproxy_cpu_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq)); + int cpu; + + if (*pos == 0) + return SEQ_START_TOKEN; + + for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { + if (!cpu_possible(cpu)) + continue; + *pos = cpu + 1; + return per_cpu_ptr(snet->stats, cpu); + } + + return NULL; +} + +static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq)); + int cpu; + + for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { + if (!cpu_possible(cpu)) + continue; + *pos = cpu + 1; + return per_cpu_ptr(snet->stats, cpu); + } + + return NULL; +} + +static void synproxy_cpu_seq_stop(struct seq_file *seq, void *v) +{ + return; +} + +static int synproxy_cpu_seq_show(struct seq_file *seq, void *v) +{ + struct synproxy_stats *stats = v; + + if (v == SEQ_START_TOKEN) { + seq_printf(seq, "entries\t\tsyn_received\t" + "cookie_invalid\tcookie_valid\t" + "cookie_retrans\tconn_reopened\n"); + return 0; + } + + seq_printf(seq, "%08x\t%08x\t%08x\t%08x\t%08x\t%08x\n", 0, + stats->syn_received, + stats->cookie_invalid, + stats->cookie_valid, + stats->cookie_retrans, + stats->conn_reopened); + + return 0; +} + +static const struct seq_operations synproxy_cpu_seq_ops = { + .start = synproxy_cpu_seq_start, + .next = synproxy_cpu_seq_next, + .stop = synproxy_cpu_seq_stop, + .show = synproxy_cpu_seq_show, +}; + +static int synproxy_cpu_seq_open(struct inode *inode, struct file *file) +{ + return seq_open_net(inode, file, &synproxy_cpu_seq_ops, + sizeof(struct seq_net_private)); +} + +static const struct file_operations synproxy_cpu_seq_fops = { + .owner = THIS_MODULE, + .open = synproxy_cpu_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net, +}; + +static int __net_init synproxy_proc_init(struct net *net) +{ + if (!proc_create("synproxy", S_IRUGO, net->proc_net_stat, + &synproxy_cpu_seq_fops)) + return -ENOMEM; + return 0; +} + +static void __net_exit synproxy_proc_exit(struct net *net) +{ + remove_proc_entry("synproxy", net->proc_net_stat); +} +#else +static int __net_init synproxy_proc_init(struct net *net) +{ + return 0; +} + +static void __net_exit synproxy_proc_exit(struct net *net) +{ + return; +} +#endif /* CONFIG_PROC_FS */ + +static int __net_init synproxy_net_init(struct net *net) +{ + struct synproxy_net *snet = synproxy_pernet(net); + struct nf_conntrack_tuple t; + struct nf_conn *ct; + int err = -ENOMEM; + + memset(&t, 0, sizeof(t)); + ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL); + if (IS_ERR(ct)) { + err = PTR_ERR(ct); + goto err1; + } + + __set_bit(IPS_TEMPLATE_BIT, &ct->status); + __set_bit(IPS_CONFIRMED_BIT, &ct->status); + if (!nfct_seqadj_ext_add(ct)) + goto err2; + if (!nfct_synproxy_ext_add(ct)) + goto err2; + + snet->tmpl = ct; + + snet->stats = alloc_percpu(struct synproxy_stats); + if (snet->stats == NULL) + goto err2; + + err = synproxy_proc_init(net); + if (err < 0) + goto err3; + + return 0; + +err3: + free_percpu(snet->stats); +err2: + nf_conntrack_free(ct); +err1: + return err; +} + +static void __net_exit synproxy_net_exit(struct net *net) +{ + struct synproxy_net *snet = synproxy_pernet(net); + + nf_conntrack_free(snet->tmpl); + synproxy_proc_exit(net); + free_percpu(snet->stats); +} + +static struct pernet_operations synproxy_net_ops = { + .init = synproxy_net_init, + .exit = synproxy_net_exit, + .id = &synproxy_net_id, + .size = sizeof(struct synproxy_net), +}; + +static int __init synproxy_core_init(void) +{ + int err; + + err = nf_ct_extend_register(&nf_ct_synproxy_extend); + if (err < 0) + goto err1; + + err = register_pernet_subsys(&synproxy_net_ops); + if (err < 0) + goto err2; + + return 0; + +err2: + nf_ct_extend_unregister(&nf_ct_synproxy_extend); +err1: + return err; +} + +static void __exit synproxy_core_exit(void) +{ + unregister_pernet_subsys(&synproxy_net_ops); + nf_ct_extend_unregister(&nf_ct_synproxy_extend); +} + +module_init(synproxy_core_init); +module_exit(synproxy_core_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick McHardy "); -- cgit v0.10.2 From 81eb6a1487718a89621a0e0be7fafd0cd7c429a4 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 27 Aug 2013 08:50:15 +0200 Subject: net: syncookies: export cookie_v6_init_sequence/cookie_v6_check Extract the local TCP stack independant parts of tcp_v6_init_sequence() and cookie_v6_check() and export them for use by the upcoming IPv6 SYNPROXY target. Signed-off-by: Patrick McHardy Acked-by: David S. Miller Tested-by: Martin Topholm Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso diff --git a/include/net/tcp.h b/include/net/tcp.h index 8e2b636..dd5e16f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -499,8 +499,12 @@ extern bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net, bool *ecn_ok); /* From net/ipv6/syncookies.c */ +extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, + u32 cookie); extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); #ifdef CONFIG_SYN_COOKIES +extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, + const struct tcphdr *th, u16 *mssp); extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mss); #else diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index d5dda20..bf63ac8 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -112,32 +112,38 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr, & COOKIEMASK; } -__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp) +u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, + const struct tcphdr *th, __u16 *mssp) { - const struct ipv6hdr *iph = ipv6_hdr(skb); - const struct tcphdr *th = tcp_hdr(skb); int mssind; const __u16 mss = *mssp; - tcp_synq_overflow(sk); - for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) if (mss >= msstab[mssind]) break; *mssp = msstab[mssind]; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); - return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, th->dest, ntohl(th->seq), jiffies / (HZ * 60), mssind); } +EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence); -static inline int cookie_check(const struct sk_buff *skb, __u32 cookie) +__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); + + tcp_synq_overflow(sk); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); + + return __cookie_v6_init_sequence(iph, th, mssp); +} + +int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, + __u32 cookie) +{ __u32 seq = ntohl(th->seq) - 1; __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, th->source, th->dest, seq, @@ -145,6 +151,7 @@ static inline int cookie_check(const struct sk_buff *skb, __u32 cookie) return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; } +EXPORT_SYMBOL_GPL(__cookie_v6_check); struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) { @@ -167,7 +174,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out; if (tcp_synq_no_recent_overflow(sk) || - (mss = cookie_check(skb, cookie)) == 0) { + (mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } -- cgit v0.10.2 From 4ad362282cb45bbc831a182e45637da8c5bd7aa1 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 27 Aug 2013 08:50:16 +0200 Subject: netfilter: add IPv6 SYNPROXY target Add an IPv6 version of the SYNPROXY target. The main differences to the IPv4 version is routing and IP header construction. Signed-off-by: Patrick McHardy Tested-by: Martin Topholm Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 4433ab40..a7f842b 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -153,6 +153,19 @@ config IP6_NF_TARGET_REJECT To compile it as a module, choose M here. If unsure, say N. +config IP6_NF_TARGET_SYNPROXY + tristate "SYNPROXY target support" + depends on NF_CONNTRACK && NETFILTER_ADVANCED + select NETFILTER_SYNPROXY + select SYN_COOKIES + help + The SYNPROXY target allows you to intercept TCP connections and + establish them using syncookies before they are passed on to the + server. This allows to avoid conntrack and server resource usage + during SYN-flood attacks. + + To compile it as a module, choose M here. If unsure, say N. + config IP6_NF_MANGLE tristate "Packet mangling" default m if NETFILTER_ADVANCED=n diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index ad13bf0..2b53738 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -37,3 +37,4 @@ obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o obj-$(CONFIG_IP6_NF_TARGET_NPT) += ip6t_NPT.o obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o +obj-$(CONFIG_IP6_NF_TARGET_SYNPROXY) += ip6t_SYNPROXY.o diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c new file mode 100644 index 0000000..4270a9b --- /dev/null +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2013 Patrick McHardy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static struct ipv6hdr * +synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, + const struct in6_addr *daddr) +{ + struct ipv6hdr *iph; + + skb_reset_network_header(skb); + iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph)); + ip6_flow_hdr(iph, 0, 0); + iph->hop_limit = 64; //XXX + iph->nexthdr = IPPROTO_TCP; + iph->saddr = *saddr; + iph->daddr = *daddr; + + return iph; +} + +static void +synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, + struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, + struct ipv6hdr *niph, struct tcphdr *nth, + unsigned int tcp_hdr_size) +{ + struct net *net = nf_ct_net((struct nf_conn *)nfct); + struct dst_entry *dst; + struct flowi6 fl6; + + nth->check = ~tcp_v6_check(tcp_hdr_size, &niph->saddr, &niph->daddr, 0); + nskb->ip_summed = CHECKSUM_PARTIAL; + nskb->csum_start = (unsigned char *)nth - nskb->head; + nskb->csum_offset = offsetof(struct tcphdr, check); + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_proto = IPPROTO_TCP; + fl6.saddr = niph->saddr; + fl6.daddr = niph->daddr; + fl6.fl6_sport = nth->source; + fl6.fl6_dport = nth->dest; + security_skb_classify_flow((struct sk_buff *)skb, flowi6_to_flowi(&fl6)); + dst = ip6_route_output(net, NULL, &fl6); + if (dst == NULL || dst->error) { + dst_release(dst); + goto free_nskb; + } + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); + if (IS_ERR(dst)) + goto free_nskb; + + skb_dst_set(nskb, dst); + + if (nfct) { + nskb->nfct = nfct; + nskb->nfctinfo = ctinfo; + nf_conntrack_get(nfct); + } + + ip6_local_out(nskb); + return; + +free_nskb: + kfree_skb(nskb); +} + +static void +synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts) +{ + struct sk_buff *nskb; + struct ipv6hdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + u16 mss = opts->mss; + + iph = ipv6_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->dest; + nth->dest = th->source; + nth->seq = htonl(__cookie_v6_init_sequence(iph, th, &mss)); + nth->ack_seq = htonl(ntohl(th->seq) + 1); + tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK; + if (opts->options & XT_SYNPROXY_OPT_ECN) + tcp_flag_word(nth) |= TCP_FLAG_ECE; + nth->doff = tcp_hdr_size / 4; + nth->window = 0; + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); +} + +static void +synproxy_send_server_syn(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts, u32 recv_seq) +{ + struct sk_buff *nskb; + struct ipv6hdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + + iph = ipv6_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->source; + nth->dest = th->dest; + nth->seq = htonl(recv_seq - 1); + /* ack_seq is used to relay our ISN to the synproxy hook to initialize + * sequence number translation once a connection tracking entry exists. + */ + nth->ack_seq = htonl(ntohl(th->ack_seq) - 1); + tcp_flag_word(nth) = TCP_FLAG_SYN; + if (opts->options & XT_SYNPROXY_OPT_ECN) + tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR; + nth->doff = tcp_hdr_size / 4; + nth->window = th->window; + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, + niph, nth, tcp_hdr_size); +} + +static void +synproxy_send_server_ack(const struct synproxy_net *snet, + const struct ip_ct_tcp *state, + const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts) +{ + struct sk_buff *nskb; + struct ipv6hdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + + iph = ipv6_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->dest; + nth->dest = th->source; + nth->seq = htonl(ntohl(th->ack_seq)); + nth->ack_seq = htonl(ntohl(th->seq) + 1); + tcp_flag_word(nth) = TCP_FLAG_ACK; + nth->doff = tcp_hdr_size / 4; + nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin); + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); +} + +static void +synproxy_send_client_ack(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts) +{ + struct sk_buff *nskb; + struct ipv6hdr *iph, *niph; + struct tcphdr *nth; + unsigned int tcp_hdr_size; + + iph = ipv6_hdr(skb); + + tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); + nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, + GFP_ATOMIC); + if (nskb == NULL) + return; + skb_reserve(nskb, MAX_TCP_HEADER); + + niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); + + skb_reset_transport_header(nskb); + nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); + nth->source = th->source; + nth->dest = th->dest; + nth->seq = htonl(ntohl(th->seq) + 1); + nth->ack_seq = th->ack_seq; + tcp_flag_word(nth) = TCP_FLAG_ACK; + nth->doff = tcp_hdr_size / 4; + nth->window = ntohs(htons(th->window) >> opts->wscale); + nth->check = 0; + nth->urg_ptr = 0; + + synproxy_build_options(nth, opts); + + synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); +} + +static bool +synproxy_recv_client_ack(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, + struct synproxy_options *opts, u32 recv_seq) +{ + int mss; + + mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1); + if (mss == 0) { + this_cpu_inc(snet->stats->cookie_invalid); + return false; + } + + this_cpu_inc(snet->stats->cookie_valid); + opts->mss = mss; + + if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy_check_timestamp_cookie(opts); + + synproxy_send_server_syn(snet, skb, th, opts, recv_seq); + return true; +} + +static unsigned int +synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) +{ + const struct xt_synproxy_info *info = par->targinfo; + struct synproxy_net *snet = synproxy_pernet(dev_net(par->in)); + struct synproxy_options opts = {}; + struct tcphdr *th, _th; + + if (nf_ip6_checksum(skb, par->hooknum, par->thoff, IPPROTO_TCP)) + return NF_DROP; + + th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th); + if (th == NULL) + return NF_DROP; + + synproxy_parse_options(skb, par->thoff, th, &opts); + + if (th->syn) { + /* Initial SYN from client */ + this_cpu_inc(snet->stats->syn_received); + + if (th->ece && th->cwr) + opts.options |= XT_SYNPROXY_OPT_ECN; + + opts.options &= info->options; + if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy_init_timestamp_cookie(info, &opts); + else + opts.options &= ~(XT_SYNPROXY_OPT_WSCALE | + XT_SYNPROXY_OPT_SACK_PERM | + XT_SYNPROXY_OPT_ECN); + + synproxy_send_client_synack(skb, th, &opts); + } else if (th->ack && !(th->fin || th->rst)) + /* ACK from client */ + synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); + + return NF_DROP; +} + +static unsigned int ipv6_synproxy_hook(unsigned int hooknum, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out)); + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + struct nf_conn_synproxy *synproxy; + struct synproxy_options opts = {}; + const struct ip_ct_tcp *state; + struct tcphdr *th, _th; + __be16 frag_off; + u8 nexthdr; + int thoff; + + ct = nf_ct_get(skb, &ctinfo); + if (ct == NULL) + return NF_ACCEPT; + + synproxy = nfct_synproxy(ct); + if (synproxy == NULL) + return NF_ACCEPT; + + if (nf_is_loopback_packet(skb)) + return NF_ACCEPT; + + nexthdr = ipv6_hdr(skb)->nexthdr; + thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, + &frag_off); + if (thoff < 0) + return NF_ACCEPT; + + th = skb_header_pointer(skb, thoff, sizeof(_th), &_th); + if (th == NULL) + return NF_DROP; + + state = &ct->proto.tcp; + switch (state->state) { + case TCP_CONNTRACK_CLOSE: + if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { + nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - + ntohl(th->seq) + 1); + break; + } + + if (!th->syn || th->ack || + CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) + break; + + /* Reopened connection - reset the sequence number and timestamp + * adjustments, they will get initialized once the connection is + * reestablished. + */ + nf_ct_seqadj_init(ct, ctinfo, 0); + synproxy->tsoff = 0; + this_cpu_inc(snet->stats->conn_reopened); + + /* fall through */ + case TCP_CONNTRACK_SYN_SENT: + synproxy_parse_options(skb, thoff, th, &opts); + + if (!th->syn && th->ack && + CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { + /* Keep-Alives are sent with SEG.SEQ = SND.NXT-1, + * therefore we need to add 1 to make the SYN sequence + * number match the one of first SYN. + */ + if (synproxy_recv_client_ack(snet, skb, th, &opts, + ntohl(th->seq) + 1)) + this_cpu_inc(snet->stats->cookie_retrans); + + return NF_DROP; + } + + synproxy->isn = ntohl(th->ack_seq); + if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy->its = opts.tsecr; + break; + case TCP_CONNTRACK_SYN_RECV: + if (!th->syn || !th->ack) + break; + + synproxy_parse_options(skb, thoff, th, &opts); + if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) + synproxy->tsoff = opts.tsval - synproxy->its; + + opts.options &= ~(XT_SYNPROXY_OPT_MSS | + XT_SYNPROXY_OPT_WSCALE | + XT_SYNPROXY_OPT_SACK_PERM); + + swap(opts.tsval, opts.tsecr); + synproxy_send_server_ack(snet, state, skb, th, &opts); + + nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); + + swap(opts.tsval, opts.tsecr); + synproxy_send_client_ack(snet, skb, th, &opts); + + consume_skb(skb); + return NF_STOLEN; + default: + break; + } + + synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy); + return NF_ACCEPT; +} + +static int synproxy_tg6_check(const struct xt_tgchk_param *par) +{ + const struct ip6t_entry *e = par->entryinfo; + + if (!(e->ipv6.flags & IP6T_F_PROTO) || + e->ipv6.proto != IPPROTO_TCP || + e->ipv6.invflags & XT_INV_PROTO) + return -EINVAL; + + return nf_ct_l3proto_try_module_get(par->family); +} + +static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) +{ + nf_ct_l3proto_module_put(par->family); +} + +static struct xt_target synproxy_tg6_reg __read_mostly = { + .name = "SYNPROXY", + .family = NFPROTO_IPV6, + .target = synproxy_tg6, + .targetsize = sizeof(struct xt_synproxy_info), + .checkentry = synproxy_tg6_check, + .destroy = synproxy_tg6_destroy, + .me = THIS_MODULE, +}; + +static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = { + { + .hook = ipv6_synproxy_hook, + .owner = THIS_MODULE, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, + { + .hook = ipv6_synproxy_hook, + .owner = THIS_MODULE, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, +}; + +static int __init synproxy_tg6_init(void) +{ + int err; + + err = nf_register_hooks(ipv6_synproxy_ops, + ARRAY_SIZE(ipv6_synproxy_ops)); + if (err < 0) + goto err1; + + err = xt_register_target(&synproxy_tg6_reg); + if (err < 0) + goto err2; + + return 0; + +err2: + nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops)); +err1: + return err; +} + +static void __exit synproxy_tg6_exit(void) +{ + xt_unregister_target(&synproxy_tg6_reg); + nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops)); +} + +module_init(synproxy_tg6_init); +module_exit(synproxy_tg6_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick McHardy "); -- cgit v0.10.2 From b7e092c05b308674c642ed7fb754d555f0ebba81 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 27 Aug 2013 11:47:26 +0200 Subject: netfilter: ctnetlink: fix uninitialized variable net/netfilter/nf_conntrack_netlink.c: In function 'ctnetlink_nfqueue_attach_expect': 'helper' may be used uninitialized in this function It was only initialized in if CTA_EXPECT_HELP_NAME attribute was present, it must be NULL otherwise. Problem added recently in bd077937 (netfilter: nfnetlink_queue: allow to attach expectations to conntracks). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 7c55745..eea936b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2162,7 +2162,7 @@ ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, { struct nlattr *cda[CTA_EXPECT_MAX+1]; struct nf_conntrack_tuple tuple, mask; - struct nf_conntrack_helper *helper; + struct nf_conntrack_helper *helper = NULL; struct nf_conntrack_expect *exp; int err; -- cgit v0.10.2 From c54f38c9aa22ff65ca9f4c1bdbf2a11d017205f3 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 29 Jul 2013 17:56:44 +0200 Subject: batman-adv: set skb priority according to content The skb priority field may help the wireless driver to choose the right queue (e.g. WMM queues). This should be set in batman-adv, as this information is only available here. This patch adds support for IPv4/IPv6 DS fields and VLAN PCP. Note that only VLAN PCP is used if a VLAN header is present. Also initially set TC_PRIO_CONTROL only for self-generated packets, and keep the priority set by higher layers. Signed-off-by: Simon Wunderlich Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 62da527..9886a2f 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -478,6 +478,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, kfree(forw_packet_aggr); goto out; } + forw_packet_aggr->skb->priority = TC_PRIO_CONTROL; skb_reserve(forw_packet_aggr->skb, ETH_HLEN); skb_buff = skb_put(forw_packet_aggr->skb, packet_len); diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index b27508b..5a99bb4 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -183,6 +183,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, goto out; } + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, ETH_HLEN); icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len); diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 08125f3..c72d1bc 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -19,6 +19,10 @@ #include #include +#include +#include +#include +#include #include "main.h" #include "sysfs.h" #include "debugfs.h" @@ -249,6 +253,60 @@ out: return primary_if; } +/** + * batadv_skb_set_priority - sets skb priority according to packet content + * @skb: the packet to be sent + * @offset: offset to the packet content + * + * This function sets a value between 256 and 263 (802.1d priority), which + * can be interpreted by the cfg80211 or other drivers. + */ +void batadv_skb_set_priority(struct sk_buff *skb, int offset) +{ + struct iphdr ip_hdr_tmp, *ip_hdr; + struct ipv6hdr ip6_hdr_tmp, *ip6_hdr; + struct ethhdr ethhdr_tmp, *ethhdr; + struct vlan_ethhdr *vhdr, vhdr_tmp; + u32 prio; + + /* already set, do nothing */ + if (skb->priority >= 256 && skb->priority <= 263) + return; + + ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), ðhdr_tmp); + if (!ethhdr) + return; + + switch (ethhdr->h_proto) { + case htons(ETH_P_8021Q): + vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr), + sizeof(*vhdr), &vhdr_tmp); + if (!vhdr) + return; + prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK; + prio = prio >> VLAN_PRIO_SHIFT; + break; + case htons(ETH_P_IP): + ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr), + sizeof(*ip_hdr), &ip_hdr_tmp); + if (!ip_hdr) + return; + prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5; + break; + case htons(ETH_P_IPV6): + ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr), + sizeof(*ip6_hdr), &ip6_hdr_tmp); + if (!ip6_hdr) + return; + prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5; + break; + default: + return; + } + + skb->priority = prio + 256; +} + static int batadv_recv_unhandled_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 5e9aebb..a7bca78 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -184,6 +184,7 @@ void batadv_mesh_free(struct net_device *soft_iface); int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); struct batadv_hard_iface * batadv_seq_print_text_primary_if_get(struct seq_file *seq); +void batadv_skb_set_priority(struct sk_buff *skb, int offset); int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev); diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 2f0bd3f..0439395 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -775,7 +775,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, struct batadv_neigh_node *neigh_node = NULL; struct batadv_unicast_packet *unicast_packet; struct ethhdr *ethhdr = eth_hdr(skb); - int res, ret = NET_RX_DROP; + int res, hdr_len, ret = NET_RX_DROP; struct sk_buff *new_skb; unicast_packet = (struct batadv_unicast_packet *)skb->data; @@ -835,6 +835,22 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, /* decrement ttl */ unicast_packet->header.ttl--; + switch (unicast_packet->header.packet_type) { + case BATADV_UNICAST_4ADDR: + hdr_len = sizeof(struct batadv_unicast_4addr_packet); + break; + case BATADV_UNICAST: + hdr_len = sizeof(struct batadv_unicast_packet); + break; + default: + /* other packet types not supported - yet */ + hdr_len = -1; + break; + } + + if (hdr_len > 0) + batadv_skb_set_priority(skb, hdr_len); + res = batadv_send_skb_to_orig(skb, orig_node, recv_if); /* translate transmit result into receive result */ @@ -1193,6 +1209,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, if (batadv_bla_check_bcast_duplist(bat_priv, skb)) goto out; + batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet)); + /* rebroadcast packet */ batadv_add_bcast_packet_to_list(bat_priv, skb, 1); diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index e9ff8d8..0266edd 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -67,7 +67,6 @@ int batadv_send_skb_packet(struct sk_buff *skb, ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); skb_set_network_header(skb, ETH_HLEN); - skb->priority = TC_PRIO_CONTROL; skb->protocol = __constant_htons(ETH_P_BATMAN); skb->dev = hard_iface->net_dev; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 0f04e1c..4493913 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -229,6 +229,8 @@ static int batadv_interface_tx(struct sk_buff *skb, */ } + batadv_skb_set_priority(skb, 0); + /* ethernet packet should be broadcasted */ if (do_bcast) { primary_if = batadv_primary_if_get_selected(bat_priv); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 429aeef..34510f3 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1626,6 +1626,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, if (!skb) goto out; + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, ETH_HLEN); tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len); tt_response->ttvn = ttvn; @@ -1691,6 +1692,7 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv, if (!skb) goto out; + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, ETH_HLEN); tt_req_len = sizeof(*tt_request); @@ -1788,6 +1790,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, if (!skb) goto unlock; + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, ETH_HLEN); packet_pos = skb_put(skb, len); tt_response = (struct batadv_tt_query_packet *)packet_pos; @@ -1906,6 +1909,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, if (!skb) goto unlock; + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, ETH_HLEN); packet_pos = skb_put(skb, len); tt_response = (struct batadv_tt_query_packet *)packet_pos; @@ -2240,6 +2244,7 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, if (!skb) goto out; + skb->priority = TC_PRIO_CONTROL; skb_reserve(skb, ETH_HLEN); roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len); diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index 857e1b8..48b31d3 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -242,6 +242,8 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv, frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); if (!frag_skb) goto dropped; + + skb->priority = TC_PRIO_CONTROL; skb_reserve(frag_skb, ucf_hdr_len); unicast_packet = (struct batadv_unicast_packet *)skb->data; diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index 4983340..d8ea31a 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -397,6 +397,7 @@ batadv_add_packet(struct batadv_priv *bat_priv, kfree(info); return NULL; } + info->skb_packet->priority = TC_PRIO_CONTROL; skb_reserve(info->skb_packet, ETH_HLEN); packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len); @@ -861,6 +862,7 @@ int batadv_vis_init(struct batadv_priv *bat_priv) if (!bat_priv->vis.my_info->skb_packet) goto free_info; + bat_priv->vis.my_info->skb_packet->priority = TC_PRIO_CONTROL; skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN); tmp_skb = bat_priv->vis.my_info->skb_packet; packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); -- cgit v0.10.2 From 791c2a2d3f1848b56b14706059d9972dade5ec1e Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sat, 17 Aug 2013 12:44:44 +0200 Subject: batman-adv: move enum definition at the top of the file Signed-off-by: Antonio Quartulli diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 9886a2f..0a8a80c 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -28,6 +28,22 @@ #include "bat_algo.h" #include "network-coding.h" + +/** + * batadv_dup_status - duplicate status + * @BATADV_NO_DUP: the packet is a duplicate + * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the + * neighbor) + * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor + * @BATADV_PROTECTED: originator is currently protected (after reboot) + */ +enum batadv_dup_status { + BATADV_NO_DUP = 0, + BATADV_ORIG_DUP, + BATADV_NEIGH_DUP, + BATADV_PROTECTED, +}; + /** * batadv_ring_buffer_set - update the ring buffer with the given value * @lq_recv: pointer to the ring buffer @@ -71,21 +87,6 @@ static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]) return (uint8_t)(sum / count); } -/* - * batadv_dup_status - duplicate status - * @BATADV_NO_DUP: the packet is a duplicate - * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the - * neighbor) - * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor - * @BATADV_PROTECTED: originator is currently protected (after reboot) - */ -enum batadv_dup_status { - BATADV_NO_DUP = 0, - BATADV_ORIG_DUP, - BATADV_NEIGH_DUP, - BATADV_PROTECTED, -}; - static struct batadv_neigh_node * batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, const uint8_t *neigh_addr, -- cgit v0.10.2 From c00a072d3fec446d763e68b50f6f6cfd9e2957e2 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 21 Jul 2013 23:03:15 +0200 Subject: batman-adv: Start new development cycle Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index a7bca78..5d00f23 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -26,7 +26,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2013.3.0" +#define BATADV_SOURCE_VERSION "2013.4.0" #endif /* B.A.T.M.A.N. parameters */ -- cgit v0.10.2 From c6eaa3f067d6f9fa55fd9645e93ed79411bebdaf Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sat, 13 Jul 2013 00:06:00 +0200 Subject: batman-adv: send GW_DEL event when the gw client mode is deselected Whenever the GW client mode is deselected, a DEL event has to be sent in order to tell userspace that the current gateway has been lost. Send the uevent on state change only if a gateway was currently selected. Reported-by: Marek Lindner Signed-off-by: Antonio Quartulli Signed-off-by: Marek Lindner diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 7614af3..1ce4b87 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -190,6 +190,33 @@ next: return curr_gw; } +/** + * batadv_gw_check_client_stop - check if client mode has been switched off + * @bat_priv: the bat priv with all the soft interface information + * + * This function assumes the caller has checked that the gw state *is actually + * changing*. This function is not supposed to be called when there is no state + * change. + */ +void batadv_gw_check_client_stop(struct batadv_priv *bat_priv) +{ + struct batadv_gw_node *curr_gw; + + if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) + return; + + curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + if (!curr_gw) + return; + + /* if batman-adv is switching the gw client mode off and a gateway was + * already selected, send a DEL uevent + */ + batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL); + + batadv_gw_node_free_ref(curr_gw); +} + void batadv_gw_election(struct batadv_priv *bat_priv) { struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL; diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 1037d75..ceef4eb 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -20,6 +20,7 @@ #ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ #define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ +void batadv_gw_check_client_stop(struct batadv_priv *bat_priv); void batadv_gw_deselect(struct batadv_priv *bat_priv); void batadv_gw_election(struct batadv_priv *bat_priv); struct batadv_orig_node * diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 929e304..4114b96 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -385,6 +385,10 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj, curr_gw_mode_str, buff); batadv_gw_deselect(bat_priv); + /* always call batadv_gw_check_client_stop() before changing the gateway + * state + */ + batadv_gw_check_client_stop(bat_priv); atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp); return count; } -- cgit v0.10.2 From 53b2f828487ea7a3f6fa4007b629466fb0d14339 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Wed, 28 Aug 2013 11:05:36 -0400 Subject: ath9k: ar9003_eeprom.c:3618 fix variable name typo drivers/net/wireless/ath/ath9k/ar9003_eeprom.c: In function 'ar9003_hw_ant_ctrl_apply': >> drivers/net/wireless/ath/ath9k/ar9003_eeprom.c:3618: warning: 'regval' is used uninitialized in this function It seems obvious that 'regval' should have been 'value'... Reported-by: Fengguang Wu Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index a6846ab..f486480 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3615,8 +3615,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); if (AR_SREV_9485(ah) && common->bt_ant_diversity) { - regval &= ~AR_SWITCH_TABLE_COM2_ALL; - regval |= ah->config.ant_ctrl_comm2g_switch_enable; + value &= ~AR_SWITCH_TABLE_COM2_ALL; + value |= ah->config.ant_ctrl_comm2g_switch_enable; } REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); -- cgit v0.10.2 From a16b635f24a72b798da41fa0814d59018a89c860 Mon Sep 17 00:00:00 2001 From: Bing Zhao Date: Mon, 26 Aug 2013 14:57:34 -0700 Subject: mwifiex: break a long line into two lines It exceeded 80 characters. Split it into two lines. Signed-off-by: Bing Zhao Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index f69d7e0..21c6882 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -69,7 +69,8 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset); /* Copy SNAP header */ - snap.snap_type = le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset)); + snap.snap_type = + le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset)); dt_offset += sizeof(u16); memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr)); -- cgit v0.10.2 From 65c1a4de59b0d417d68c04d5ee033058a9e7a83a Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 27 Aug 2013 11:34:26 +0530 Subject: ath9k: Remove unused ANI commands Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 4994bea..be466b0 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -319,9 +319,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) ah->ani_function = 0; } - /* always allow mode (on/off) to be controlled */ - ah->ani_function |= ATH9K_ANI_MODE; - ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL, aniState->ofdmNoiseImmunityLevel); cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL, diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index b54a3fb..21e7b83 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -48,15 +48,10 @@ /* values here are relative to the INI */ enum ath9k_ani_cmd { - ATH9K_ANI_PRESENT = 0x1, - ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2, - ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4, - ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8, - ATH9K_ANI_FIRSTEP_LEVEL = 0x10, - ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20, - ATH9K_ANI_MODE = 0x40, - ATH9K_ANI_PHYERR_RESET = 0x80, - ATH9K_ANI_MRC_CCK = 0x100, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x1, + ATH9K_ANI_FIRSTEP_LEVEL = 0x2, + ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x4, + ATH9K_ANI_MRC_CCK = 0x8, ATH9K_ANI_ALL = 0xfff }; diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 1576d58..0865647 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -1160,8 +1160,6 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, */ WARN_ON(1); break; - case ATH9K_ANI_PRESENT: - break; default: ath_dbg(common, ANI, "invalid cmd %u\n", cmd); return false; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 46b910a..e897648 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1193,8 +1193,6 @@ skip_ws_det: } break; } - case ATH9K_ANI_PRESENT: - break; default: ath_dbg(common, ANI, "invalid cmd %u\n", cmd); return false; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 2670bf6..ec47c50 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -655,8 +655,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) ath9k_hw_init_cal_settings(ah); ah->ani_function = ATH9K_ANI_ALL; - if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) - ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; if (!AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_MRC_CCK; -- cgit v0.10.2 From 27251e0087598befb39599eb3dd2a3c59bce2fb9 Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Tue, 27 Aug 2013 11:34:39 +0530 Subject: ath9k: Enable D3/L1 ASPM fix for AR9462 AR9462 requires this HW fix for ASPM to work properly. Also, since WARegVal is used only for the AR8003 family, use AR_SREV_9300_20_OR_LATER. Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index ec47c50..ecc6ec4 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -574,18 +574,17 @@ static int __ath9k_hw_init(struct ath_hw *ah) * We need to do this to avoid RMW of this register. We cannot * read the reg when chip is asleep. */ - ah->WARegVal = REG_READ(ah, AR_WA); - ah->WARegVal |= (AR_WA_D3_L1_DISABLE | - AR_WA_ASPM_TIMER_BASED_DISABLE); + if (AR_SREV_9300_20_OR_LATER(ah)) { + ah->WARegVal = REG_READ(ah, AR_WA); + ah->WARegVal |= (AR_WA_D3_L1_DISABLE | + AR_WA_ASPM_TIMER_BASED_DISABLE); + } if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_err(common, "Couldn't reset chip\n"); return -EIO; } - if (AR_SREV_9462(ah)) - ah->WARegVal &= ~AR_WA_D3_L1_DISABLE; - if (AR_SREV_9565(ah)) { ah->WARegVal |= AR_WA_BIT22; REG_WRITE(ah, AR_WA, ah->WARegVal); -- cgit v0.10.2 From 708ff0835343c0696e69888943be05efd12fe7c6 Mon Sep 17 00:00:00 2001 From: Masami Ichikawa Date: Wed, 28 Aug 2013 00:37:23 +0900 Subject: rt2800usb: Add WLI-UC-G300HP's Product ID. Support Bufallo WLI-UC-G300HP. Signed-off-by: Masami Ichikawa Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 338034e..96961b9 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -971,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0411, 0x016f) }, { USB_DEVICE(0x0411, 0x01a2) }, { USB_DEVICE(0x0411, 0x01ee) }, + { USB_DEVICE(0x0411, 0x01a8) }, /* Corega */ { USB_DEVICE(0x07aa, 0x002f) }, { USB_DEVICE(0x07aa, 0x003c) }, -- cgit v0.10.2 From 7f190230baac602f3f2be8e143c1c7c5aad15b1e Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Tue, 27 Aug 2013 20:17:12 -0400 Subject: cw1200: Display the correct default reference clock. This is purely a cosmetic bug. Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c index 3724e73..090f015 100644 --- a/drivers/net/wireless/cw1200/main.c +++ b/drivers/net/wireless/cw1200/main.c @@ -507,7 +507,7 @@ u32 cw1200_dpll_from_clk(u16 clk_khz) case 0xCB20: /* 52000 KHz */ return 0x07627091; default: - pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n", + pr_err("Unknown Refclk freq (0x%04x), using 26000KHz\n", clk_khz); return 0x0EC4F121; } -- cgit v0.10.2 From 076f0d20b636ef0e701e21e701c0631b5757b732 Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Tue, 27 Aug 2013 20:17:13 -0400 Subject: cw1200: When debug is enabled, display all wakeup conditions for the wait_event_interruptible_timeout() call. When trying to debug an interrupt delivery problem I noticed that not all of the wakeup conditions on the worker thread were included in the debug message. This patch rectifies that. Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c index c1ec2a4..92d299a 100644 --- a/drivers/net/wireless/cw1200/bh.c +++ b/drivers/net/wireless/cw1200/bh.c @@ -465,8 +465,8 @@ static int cw1200_bh(void *arg) (rx || tx || term || suspend || priv->bh_error); }), status); - pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n", - rx, tx, term, suspend, status); + pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n", + rx, tx, term, suspend, priv->bh_error, status); /* Did an error occur? */ if ((status < 0 && status != -ERESTARTSYS) || -- cgit v0.10.2 From 43e9d1943278e96150b449ea1fa81f4ae27729d5 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Mon, 26 Aug 2013 12:59:37 +0100 Subject: xen-netback: remove page tracking facility The data flow from DomU to DomU on the same host in current copying scheme with tracking facility: copy DomU --------> Dom0 DomU | ^ |____________________________| copy The page in Dom0 is a page with valid MFN. So we can always copy from page Dom0, thus removing the need for a tracking facility. copy copy DomU --------> Dom0 -------> DomU Simple iperf test shows no performance regression (obviously we copy twice either way): W/ tracking: ~5.3Gb/s W/o tracking: ~5.4Gb/s Signed-off-by: Wei Liu Acked-by: Ian Campbell Acked-by: Matt Wilson Signed-off-by: David S. Miller diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 64828de..91f163d 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -95,21 +95,6 @@ struct netbk_rx_meta { #define MAX_BUFFER_OFFSET PAGE_SIZE -/* extra field used in struct page */ -union page_ext { - struct { -#if BITS_PER_LONG < 64 -#define IDX_WIDTH 8 -#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) - unsigned int group:GROUP_WIDTH; - unsigned int idx:IDX_WIDTH; -#else - unsigned int group, idx; -#endif - } e; - void *mapping; -}; - struct xen_netbk { wait_queue_head_t wq; struct task_struct *task; @@ -214,45 +199,6 @@ static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); } -/* extra field used in struct page */ -static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, - unsigned int idx) -{ - unsigned int group = netbk - xen_netbk; - union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; - - BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); - pg->mapping = ext.mapping; -} - -static int get_page_ext(struct page *pg, - unsigned int *pgroup, unsigned int *pidx) -{ - union page_ext ext = { .mapping = pg->mapping }; - struct xen_netbk *netbk; - unsigned int group, idx; - - group = ext.e.group - 1; - - if (group < 0 || group >= xen_netbk_group_nr) - return 0; - - netbk = &xen_netbk[group]; - - idx = ext.e.idx; - - if ((idx < 0) || (idx >= MAX_PENDING_REQS)) - return 0; - - if (netbk->mmap_pages[idx] != pg) - return 0; - - *pgroup = group; - *pidx = idx; - - return 1; -} - /* * This is the amount of packet we copy rather than map, so that the * guest can't fiddle with the contents of the headers while we do @@ -453,12 +399,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, { struct gnttab_copy *copy_gop; struct netbk_rx_meta *meta; - /* - * These variables are used iff get_page_ext returns true, - * in which case they are guaranteed to be initialized. - */ - unsigned int uninitialized_var(group), uninitialized_var(idx); - int foreign = get_page_ext(page, &group, &idx); unsigned long bytes; /* Data must not cross a page boundary. */ @@ -494,20 +434,9 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, copy_gop = npo->copy + npo->copy_prod++; copy_gop->flags = GNTCOPY_dest_gref; - if (foreign) { - struct xen_netbk *netbk = &xen_netbk[group]; - struct pending_tx_info *src_pend; + copy_gop->source.domid = DOMID_SELF; + copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); - src_pend = &netbk->pending_tx_info[idx]; - - copy_gop->source.domid = src_pend->vif->domid; - copy_gop->source.u.ref = src_pend->req.gref; - copy_gop->flags |= GNTCOPY_source_gref; - } else { - void *vaddr = page_address(page); - copy_gop->source.domid = DOMID_SELF; - copy_gop->source.u.gmfn = virt_to_mfn(vaddr); - } copy_gop->source.offset = offset; copy_gop->dest.domid = vif->domid; @@ -1047,7 +976,6 @@ static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, page = alloc_page(GFP_KERNEL|__GFP_COLD); if (!page) return NULL; - set_page_ext(page, netbk, pending_idx); netbk->mmap_pages[pending_idx] = page; return page; } @@ -1155,7 +1083,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, first->req.offset = 0; first->req.size = dst_offset; first->head = start_idx; - set_page_ext(page, netbk, head_idx); netbk->mmap_pages[head_idx] = page; frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); } -- cgit v0.10.2 From b3f980bd827e6e81a050c518d60ed7811a83061d Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Mon, 26 Aug 2013 12:59:38 +0100 Subject: xen-netback: switch to NAPI + kthread 1:1 model This patch implements 1:1 model netback. NAPI and kthread are utilized to do the weight-lifting job: - NAPI is used for guest side TX (host side RX) - kthread is used for guest side RX (host side TX) Xenvif and xen_netbk are made into one structure to reduce code size. This model provides better scheduling fairness among vifs. It is also prerequisite for implementing multiqueue for Xen netback. Signed-off-by: Wei Liu Acked-by: Ian Campbell Signed-off-by: David S. Miller diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 8a4d77e..9c1f158 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -45,31 +45,109 @@ #include #include -struct xen_netbk; +typedef unsigned int pending_ring_idx_t; +#define INVALID_PENDING_RING_IDX (~0U) + +/* For the head field in pending_tx_info: it is used to indicate + * whether this tx info is the head of one or more coalesced requests. + * + * When head != INVALID_PENDING_RING_IDX, it means the start of a new + * tx requests queue and the end of previous queue. + * + * An example sequence of head fields (I = INVALID_PENDING_RING_IDX): + * + * ...|0 I I I|5 I|9 I I I|... + * -->|<-INUSE---------------- + * + * After consuming the first slot(s) we have: + * + * ...|V V V V|5 I|9 I I I|... + * -----FREE->|<-INUSE-------- + * + * where V stands for "valid pending ring index". Any number other + * than INVALID_PENDING_RING_IDX is OK. These entries are considered + * free and can contain any number other than + * INVALID_PENDING_RING_IDX. In practice we use 0. + * + * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the + * above example) number is the index into pending_tx_info and + * mmap_pages arrays. + */ +struct pending_tx_info { + struct xen_netif_tx_request req; /* coalesced tx request */ + pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX + * if it is head of one or more tx + * reqs + */ +}; + +#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) +#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) + +struct xenvif_rx_meta { + int id; + int size; + int gso_size; +}; + +/* Discriminate from any valid pending_idx value. */ +#define INVALID_PENDING_IDX 0xFFFF + +#define MAX_BUFFER_OFFSET PAGE_SIZE + +#define MAX_PENDING_REQS 256 struct xenvif { /* Unique identifier for this interface. */ domid_t domid; unsigned int handle; - /* Reference to netback processing backend. */ - struct xen_netbk *netbk; + /* Use NAPI for guest TX */ + struct napi_struct napi; + /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ + unsigned int tx_irq; + /* Only used when feature-split-event-channels = 1 */ + char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ + struct xen_netif_tx_back_ring tx; + struct sk_buff_head tx_queue; + struct page *mmap_pages[MAX_PENDING_REQS]; + pending_ring_idx_t pending_prod; + pending_ring_idx_t pending_cons; + u16 pending_ring[MAX_PENDING_REQS]; + struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; + + /* Coalescing tx requests before copying makes number of grant + * copy ops greater or equal to number of slots required. In + * worst case a tx request consumes 2 gnttab_copy. + */ + struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; - u8 fe_dev_addr[6]; + /* Use kthread for guest RX */ + struct task_struct *task; + wait_queue_head_t wq; /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ - unsigned int tx_irq; unsigned int rx_irq; /* Only used when feature-split-event-channels = 1 */ - char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ + struct xen_netif_rx_back_ring rx; + struct sk_buff_head rx_queue; - /* List of frontends to notify after a batch of frames sent. */ - struct list_head notify_list; + /* Allow xenvif_start_xmit() to peek ahead in the rx request + * ring. This is a prediction of what rx_req_cons will be + * once all queued skbs are put on the ring. + */ + RING_IDX rx_req_cons_peek; + + /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each + * head/fragment page uses 2 copy operations because it + * straddles two buffers in the frontend. + */ + struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; + struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; - /* The shared rings and indexes. */ - struct xen_netif_tx_back_ring tx; - struct xen_netif_rx_back_ring rx; + + u8 fe_dev_addr[6]; /* Frontend feature information. */ u8 can_sg:1; @@ -80,13 +158,6 @@ struct xenvif { /* Internal feature information. */ u8 can_queue:1; /* can queue packets for receiver? */ - /* - * Allow xenvif_start_xmit() to peek ahead in the rx request - * ring. This is a prediction of what rx_req_cons will be - * once all queued skbs are put on the ring. - */ - RING_IDX rx_req_cons_peek; - /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ unsigned long credit_bytes; unsigned long credit_usec; @@ -97,11 +168,7 @@ struct xenvif { unsigned long rx_gso_checksum_fixup; /* Miscellaneous private stuff. */ - struct list_head schedule_list; - atomic_t refcnt; struct net_device *dev; - - wait_queue_head_t waiting_to_free; }; static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) @@ -109,9 +176,6 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) return to_xenbus_device(vif->dev->dev.parent); } -#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) -#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) - struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, unsigned int handle); @@ -121,9 +185,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, unsigned int rx_evtchn); void xenvif_disconnect(struct xenvif *vif); -void xenvif_get(struct xenvif *vif); -void xenvif_put(struct xenvif *vif); - int xenvif_xenbus_init(void); void xenvif_xenbus_fini(void); @@ -139,18 +200,8 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref); -/* (De)Register a xenvif with the netback backend. */ -void xen_netbk_add_xenvif(struct xenvif *vif); -void xen_netbk_remove_xenvif(struct xenvif *vif); - -/* (De)Schedule backend processing for a xenvif */ -void xen_netbk_schedule_xenvif(struct xenvif *vif); -void xen_netbk_deschedule_xenvif(struct xenvif *vif); - /* Check for SKBs from frontend and schedule backend processing */ void xen_netbk_check_rx_xenvif(struct xenvif *vif); -/* Receive an SKB from the frontend */ -void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb); /* Queue an SKB for transmission to the frontend */ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); @@ -163,6 +214,11 @@ void xenvif_carrier_off(struct xenvif *vif); /* Returns number of ring slots required to send an skb to the frontend */ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); +int xen_netbk_tx_action(struct xenvif *vif, int budget); +void xen_netbk_rx_action(struct xenvif *vif); + +int xen_netbk_kthread(void *data); + extern bool separate_tx_rx_irq; #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 087d2db..44d6b70 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -30,6 +30,7 @@ #include "common.h" +#include #include #include #include @@ -38,17 +39,7 @@ #include #define XENVIF_QUEUE_LENGTH 32 - -void xenvif_get(struct xenvif *vif) -{ - atomic_inc(&vif->refcnt); -} - -void xenvif_put(struct xenvif *vif) -{ - if (atomic_dec_and_test(&vif->refcnt)) - wake_up(&vif->waiting_to_free); -} +#define XENVIF_NAPI_WEIGHT 64 int xenvif_schedulable(struct xenvif *vif) { @@ -64,21 +55,55 @@ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) { struct xenvif *vif = dev_id; - if (vif->netbk == NULL) - return IRQ_HANDLED; - - xen_netbk_schedule_xenvif(vif); + if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) + napi_schedule(&vif->napi); return IRQ_HANDLED; } +static int xenvif_poll(struct napi_struct *napi, int budget) +{ + struct xenvif *vif = container_of(napi, struct xenvif, napi); + int work_done; + + work_done = xen_netbk_tx_action(vif, budget); + + if (work_done < budget) { + int more_to_do = 0; + unsigned long flags; + + /* It is necessary to disable IRQ before calling + * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might + * lose event from the frontend. + * + * Consider: + * RING_HAS_UNCONSUMED_REQUESTS + * + * __napi_complete + * + * This handler is still in scheduled state so the + * event has no effect at all. After __napi_complete + * this handler is descheduled and cannot get + * scheduled again. We lose event in this case and the ring + * will be completely stalled. + */ + + local_irq_save(flags); + + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); + if (!more_to_do) + __napi_complete(napi); + + local_irq_restore(flags); + } + + return work_done; +} + static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { struct xenvif *vif = dev_id; - if (vif->netbk == NULL) - return IRQ_HANDLED; - if (xenvif_rx_schedulable(vif)) netif_wake_queue(vif->dev); @@ -99,7 +124,8 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) BUG_ON(skb->dev != dev); - if (vif->netbk == NULL) + /* Drop the packet if vif is not ready */ + if (vif->task == NULL) goto drop; /* Drop the packet if the target domain has no receive buffers. */ @@ -108,7 +134,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Reserve ring slots for the worst-case number of fragments. */ vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); - xenvif_get(vif); if (vif->can_queue && xen_netbk_must_stop_queue(vif)) netif_stop_queue(dev); @@ -123,11 +148,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb) -{ - netif_rx_ni(skb); -} - void xenvif_notify_tx_completion(struct xenvif *vif) { if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) @@ -142,7 +162,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) static void xenvif_up(struct xenvif *vif) { - xen_netbk_add_xenvif(vif); + napi_enable(&vif->napi); enable_irq(vif->tx_irq); if (vif->tx_irq != vif->rx_irq) enable_irq(vif->rx_irq); @@ -151,12 +171,11 @@ static void xenvif_up(struct xenvif *vif) static void xenvif_down(struct xenvif *vif) { + napi_disable(&vif->napi); disable_irq(vif->tx_irq); if (vif->tx_irq != vif->rx_irq) disable_irq(vif->rx_irq); del_timer_sync(&vif->credit_timeout); - xen_netbk_deschedule_xenvif(vif); - xen_netbk_remove_xenvif(vif); } static int xenvif_open(struct net_device *dev) @@ -272,11 +291,12 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, struct net_device *dev; struct xenvif *vif; char name[IFNAMSIZ] = {}; + int i; snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); if (dev == NULL) { - pr_warn("Could not allocate netdev\n"); + pr_warn("Could not allocate netdev for %s\n", name); return ERR_PTR(-ENOMEM); } @@ -285,14 +305,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, vif = netdev_priv(dev); vif->domid = domid; vif->handle = handle; - vif->netbk = NULL; vif->can_sg = 1; vif->csum = 1; - atomic_set(&vif->refcnt, 1); - init_waitqueue_head(&vif->waiting_to_free); vif->dev = dev; - INIT_LIST_HEAD(&vif->schedule_list); - INIT_LIST_HEAD(&vif->notify_list); vif->credit_bytes = vif->remaining_credit = ~0UL; vif->credit_usec = 0UL; @@ -307,6 +322,16 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, dev->tx_queue_len = XENVIF_QUEUE_LENGTH; + skb_queue_head_init(&vif->rx_queue); + skb_queue_head_init(&vif->tx_queue); + + vif->pending_cons = 0; + vif->pending_prod = MAX_PENDING_REQS; + for (i = 0; i < MAX_PENDING_REQS; i++) + vif->pending_ring[i] = i; + for (i = 0; i < MAX_PENDING_REQS; i++) + vif->mmap_pages[i] = NULL; + /* * Initialise a dummy MAC address. We choose the numerically * largest non-broadcast address to prevent the address getting @@ -316,6 +341,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, memset(dev->dev_addr, 0xFF, ETH_ALEN); dev->dev_addr[0] &= ~0x01; + netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT); + netif_carrier_off(dev); err = register_netdev(dev); @@ -377,7 +404,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, disable_irq(vif->rx_irq); } - xenvif_get(vif); + init_waitqueue_head(&vif->wq); + vif->task = kthread_create(xen_netbk_kthread, + (void *)vif, vif->dev->name); + if (IS_ERR(vif->task)) { + pr_warn("Could not allocate kthread for %s\n", vif->dev->name); + err = PTR_ERR(vif->task); + goto err_rx_unbind; + } rtnl_lock(); if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) @@ -388,7 +422,13 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, xenvif_up(vif); rtnl_unlock(); + wake_up_process(vif->task); + return 0; + +err_rx_unbind: + unbind_from_irqhandler(vif->rx_irq, vif); + vif->rx_irq = 0; err_tx_unbind: unbind_from_irqhandler(vif->tx_irq, vif); vif->tx_irq = 0; @@ -408,7 +448,6 @@ void xenvif_carrier_off(struct xenvif *vif) if (netif_running(dev)) xenvif_down(vif); rtnl_unlock(); - xenvif_put(vif); } void xenvif_disconnect(struct xenvif *vif) @@ -422,9 +461,6 @@ void xenvif_disconnect(struct xenvif *vif) if (netif_carrier_ok(vif->dev)) xenvif_carrier_off(vif); - atomic_dec(&vif->refcnt); - wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); - if (vif->tx_irq) { if (vif->tx_irq == vif->rx_irq) unbind_from_irqhandler(vif->tx_irq, vif); @@ -438,6 +474,11 @@ void xenvif_disconnect(struct xenvif *vif) need_module_put = 1; } + if (vif->task) + kthread_stop(vif->task); + + netif_napi_del(&vif->napi); + unregister_netdev(vif->dev); xen_netbk_unmap_frontend_rings(vif); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 91f163d..44ccc67 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -70,116 +70,25 @@ module_param(fatal_skb_slots, uint, 0444); */ #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN -typedef unsigned int pending_ring_idx_t; -#define INVALID_PENDING_RING_IDX (~0U) - -struct pending_tx_info { - struct xen_netif_tx_request req; /* coalesced tx request */ - struct xenvif *vif; - pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX - * if it is head of one or more tx - * reqs - */ -}; - -struct netbk_rx_meta { - int id; - int size; - int gso_size; -}; - -#define MAX_PENDING_REQS 256 - -/* Discriminate from any valid pending_idx value. */ -#define INVALID_PENDING_IDX 0xFFFF - -#define MAX_BUFFER_OFFSET PAGE_SIZE - -struct xen_netbk { - wait_queue_head_t wq; - struct task_struct *task; - - struct sk_buff_head rx_queue; - struct sk_buff_head tx_queue; - - struct timer_list net_timer; - - struct page *mmap_pages[MAX_PENDING_REQS]; - - pending_ring_idx_t pending_prod; - pending_ring_idx_t pending_cons; - struct list_head net_schedule_list; - - /* Protect the net_schedule_list in netif. */ - spinlock_t net_schedule_list_lock; - - atomic_t netfront_count; - - struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; - /* Coalescing tx requests before copying makes number of grant - * copy ops greater or equal to number of slots required. In - * worst case a tx request consumes 2 gnttab_copy. - */ - struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; - - u16 pending_ring[MAX_PENDING_REQS]; - - /* - * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each - * head/fragment page uses 2 copy operations because it - * straddles two buffers in the frontend. - */ - struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; - struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; -}; - -static struct xen_netbk *xen_netbk; -static int xen_netbk_group_nr; - /* * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of * one or more merged tx requests, otherwise it is the continuation of * previous tx request. */ -static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx) -{ - return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; -} - -void xen_netbk_add_xenvif(struct xenvif *vif) -{ - int i; - int min_netfront_count; - int min_group = 0; - struct xen_netbk *netbk; - - min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); - for (i = 0; i < xen_netbk_group_nr; i++) { - int netfront_count = atomic_read(&xen_netbk[i].netfront_count); - if (netfront_count < min_netfront_count) { - min_group = i; - min_netfront_count = netfront_count; - } - } - - netbk = &xen_netbk[min_group]; - - vif->netbk = netbk; - atomic_inc(&netbk->netfront_count); -} - -void xen_netbk_remove_xenvif(struct xenvif *vif) +static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx) { - struct xen_netbk *netbk = vif->netbk; - vif->netbk = NULL; - atomic_dec(&netbk->netfront_count); + return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, u8 status); static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); + +static inline int tx_work_todo(struct xenvif *vif); +static inline int rx_work_todo(struct xenvif *vif); + static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 id, s8 st, @@ -187,16 +96,16 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 size, u16 flags); -static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, +static inline unsigned long idx_to_pfn(struct xenvif *vif, u16 idx) { - return page_to_pfn(netbk->mmap_pages[idx]); + return page_to_pfn(vif->mmap_pages[idx]); } -static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, +static inline unsigned long idx_to_kaddr(struct xenvif *vif, u16 idx) { - return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); + return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); } /* @@ -224,15 +133,10 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) +static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) { return MAX_PENDING_REQS - - netbk->pending_prod + netbk->pending_cons; -} - -static void xen_netbk_kick_thread(struct xen_netbk *netbk) -{ - wake_up(&netbk->wq); + vif->pending_prod + vif->pending_cons; } static int max_required_rx_slots(struct xenvif *vif) @@ -364,15 +268,15 @@ struct netrx_pending_operations { unsigned copy_prod, copy_cons; unsigned meta_prod, meta_cons; struct gnttab_copy *copy; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; int copy_off; grant_ref_t copy_gref; }; -static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, - struct netrx_pending_operations *npo) +static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, + struct netrx_pending_operations *npo) { - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; struct xen_netif_rx_request *req; req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); @@ -398,7 +302,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, unsigned long offset, int *head) { struct gnttab_copy *copy_gop; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; unsigned long bytes; /* Data must not cross a page boundary. */ @@ -434,15 +338,15 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, copy_gop = npo->copy + npo->copy_prod++; copy_gop->flags = GNTCOPY_dest_gref; + copy_gop->len = bytes; + copy_gop->source.domid = DOMID_SELF; copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); - copy_gop->source.offset = offset; - copy_gop->dest.domid = vif->domid; + copy_gop->dest.domid = vif->domid; copy_gop->dest.offset = npo->copy_off; copy_gop->dest.u.ref = npo->copy_gref; - copy_gop->len = bytes; npo->copy_off += bytes; meta->size += bytes; @@ -485,7 +389,7 @@ static int netbk_gop_skb(struct sk_buff *skb, int nr_frags = skb_shinfo(skb)->nr_frags; int i; struct xen_netif_rx_request *req; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; unsigned char *data; int head = 1; int old_meta_prod; @@ -565,7 +469,7 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, } static void netbk_add_frag_responses(struct xenvif *vif, int status, - struct netbk_rx_meta *meta, + struct xenvif_rx_meta *meta, int nr_meta_slots) { int i; @@ -594,9 +498,13 @@ struct skb_cb_overlay { int meta_slots_used; }; -static void xen_netbk_rx_action(struct xen_netbk *netbk) +static void xen_netbk_kick_thread(struct xenvif *vif) +{ + wake_up(&vif->wq); +} + +void xen_netbk_rx_action(struct xenvif *vif) { - struct xenvif *vif = NULL, *tmp; s8 status; u16 flags; struct xen_netif_rx_response *resp; @@ -608,17 +516,18 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) int count; unsigned long offset; struct skb_cb_overlay *sco; + int need_to_notify = 0; struct netrx_pending_operations npo = { - .copy = netbk->grant_copy_op, - .meta = netbk->meta, + .copy = vif->grant_copy_op, + .meta = vif->meta, }; skb_queue_head_init(&rxq); count = 0; - while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { + while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { vif = netdev_priv(skb->dev); nr_frags = skb_shinfo(skb)->nr_frags; @@ -635,27 +544,27 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) break; } - BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); + BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); if (!npo.copy_prod) return; - BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); - gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod); + BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); + gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { sco = (struct skb_cb_overlay *)skb->cb; vif = netdev_priv(skb->dev); - if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { + if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { resp = RING_GET_RESPONSE(&vif->rx, - vif->rx.rsp_prod_pvt++); + vif->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; - resp->offset = netbk->meta[npo.meta_cons].gso_size; - resp->id = netbk->meta[npo.meta_cons].id; + resp->offset = vif->meta[npo.meta_cons].gso_size; + resp->id = vif->meta[npo.meta_cons].id; resp->status = sco->meta_slots_used; npo.meta_cons++; @@ -680,12 +589,12 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) flags |= XEN_NETRXF_data_validated; offset = 0; - resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, + resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, status, offset, - netbk->meta[npo.meta_cons].size, + vif->meta[npo.meta_cons].size, flags); - if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { + if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&vif->rx, @@ -693,7 +602,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) resp->flags |= XEN_NETRXF_extra_info; - gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; + gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; @@ -703,112 +612,33 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) } netbk_add_frag_responses(vif, status, - netbk->meta + npo.meta_cons + 1, + vif->meta + npo.meta_cons + 1, sco->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); + if (ret) + need_to_notify = 1; + xenvif_notify_tx_completion(vif); - if (ret && list_empty(&vif->notify_list)) - list_add_tail(&vif->notify_list, ¬ify); - else - xenvif_put(vif); npo.meta_cons += sco->meta_slots_used; dev_kfree_skb(skb); } - list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { + if (need_to_notify) notify_remote_via_irq(vif->rx_irq); - list_del_init(&vif->notify_list); - xenvif_put(vif); - } /* More work to do? */ - if (!skb_queue_empty(&netbk->rx_queue) && - !timer_pending(&netbk->net_timer)) - xen_netbk_kick_thread(netbk); + if (!skb_queue_empty(&vif->rx_queue)) + xen_netbk_kick_thread(vif); } void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) { - struct xen_netbk *netbk = vif->netbk; + skb_queue_tail(&vif->rx_queue, skb); - skb_queue_tail(&netbk->rx_queue, skb); - - xen_netbk_kick_thread(netbk); -} - -static void xen_netbk_alarm(unsigned long data) -{ - struct xen_netbk *netbk = (struct xen_netbk *)data; - xen_netbk_kick_thread(netbk); -} - -static int __on_net_schedule_list(struct xenvif *vif) -{ - return !list_empty(&vif->schedule_list); -} - -/* Must be called with net_schedule_list_lock held */ -static void remove_from_net_schedule_list(struct xenvif *vif) -{ - if (likely(__on_net_schedule_list(vif))) { - list_del_init(&vif->schedule_list); - xenvif_put(vif); - } -} - -static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) -{ - struct xenvif *vif = NULL; - - spin_lock_irq(&netbk->net_schedule_list_lock); - if (list_empty(&netbk->net_schedule_list)) - goto out; - - vif = list_first_entry(&netbk->net_schedule_list, - struct xenvif, schedule_list); - if (!vif) - goto out; - - xenvif_get(vif); - - remove_from_net_schedule_list(vif); -out: - spin_unlock_irq(&netbk->net_schedule_list_lock); - return vif; -} - -void xen_netbk_schedule_xenvif(struct xenvif *vif) -{ - unsigned long flags; - struct xen_netbk *netbk = vif->netbk; - - if (__on_net_schedule_list(vif)) - goto kick; - - spin_lock_irqsave(&netbk->net_schedule_list_lock, flags); - if (!__on_net_schedule_list(vif) && - likely(xenvif_schedulable(vif))) { - list_add_tail(&vif->schedule_list, &netbk->net_schedule_list); - xenvif_get(vif); - } - spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags); - -kick: - smp_mb(); - if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && - !list_empty(&netbk->net_schedule_list)) - xen_netbk_kick_thread(netbk); -} - -void xen_netbk_deschedule_xenvif(struct xenvif *vif) -{ - struct xen_netbk *netbk = vif->netbk; - spin_lock_irq(&netbk->net_schedule_list_lock); - remove_from_net_schedule_list(vif); - spin_unlock_irq(&netbk->net_schedule_list_lock); + xen_netbk_kick_thread(vif); } void xen_netbk_check_rx_xenvif(struct xenvif *vif) @@ -818,7 +648,7 @@ void xen_netbk_check_rx_xenvif(struct xenvif *vif) RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); if (more_to_do) - xen_netbk_schedule_xenvif(vif); + napi_schedule(&vif->napi); } static void tx_add_credit(struct xenvif *vif) @@ -860,15 +690,12 @@ static void netbk_tx_err(struct xenvif *vif, txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); vif->tx.req_cons = cons; - xen_netbk_check_rx_xenvif(vif); - xenvif_put(vif); } static void netbk_fatal_tx_err(struct xenvif *vif) { netdev_err(vif->dev, "fatal error; disabling device\n"); xenvif_carrier_off(vif); - xenvif_put(vif); } static int netbk_count_requests(struct xenvif *vif, @@ -969,19 +796,20 @@ static int netbk_count_requests(struct xenvif *vif, return slots; } -static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, +static struct page *xen_netbk_alloc_page(struct xenvif *vif, u16 pending_idx) { struct page *page; - page = alloc_page(GFP_KERNEL|__GFP_COLD); + + page = alloc_page(GFP_ATOMIC|__GFP_COLD); if (!page) return NULL; - netbk->mmap_pages[pending_idx] = page; + vif->mmap_pages[pending_idx] = page; + return page; } -static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, - struct xenvif *vif, +static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, struct sk_buff *skb, struct xen_netif_tx_request *txp, struct gnttab_copy *gop) @@ -1012,9 +840,9 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, for (shinfo->nr_frags = slot = start; slot < nr_slots; shinfo->nr_frags++) { struct pending_tx_info *pending_tx_info = - netbk->pending_tx_info; + vif->pending_tx_info; - page = alloc_page(GFP_KERNEL|__GFP_COLD); + page = alloc_page(GFP_ATOMIC|__GFP_COLD); if (!page) goto err; @@ -1049,21 +877,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, gop->len = txp->size; dst_offset += gop->len; - index = pending_index(netbk->pending_cons++); + index = pending_index(vif->pending_cons++); - pending_idx = netbk->pending_ring[index]; + pending_idx = vif->pending_ring[index]; memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); - xenvif_get(vif); - - pending_tx_info[pending_idx].vif = vif; /* Poison these fields, corresponding * fields for head tx req will be set * to correct values after the loop. */ - netbk->mmap_pages[pending_idx] = (void *)(~0UL); + vif->mmap_pages[pending_idx] = (void *)(~0UL); pending_tx_info[pending_idx].head = INVALID_PENDING_RING_IDX; @@ -1083,7 +908,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, first->req.offset = 0; first->req.size = dst_offset; first->head = start_idx; - netbk->mmap_pages[head_idx] = page; + vif->mmap_pages[head_idx] = page; frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); } @@ -1093,18 +918,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, err: /* Unwind, freeing all pages and sending error responses. */ while (shinfo->nr_frags-- > start) { - xen_netbk_idx_release(netbk, + xen_netbk_idx_release(vif, frag_get_pending_idx(&frags[shinfo->nr_frags]), XEN_NETIF_RSP_ERROR); } /* The head too, if necessary. */ if (start) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); return NULL; } -static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, +static int xen_netbk_tx_check_gop(struct xenvif *vif, struct sk_buff *skb, struct gnttab_copy **gopp) { @@ -1119,7 +944,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, /* Check status of header. */ err = gop->status; if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); @@ -1129,7 +954,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, pending_ring_idx_t head; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); - tx_info = &netbk->pending_tx_info[pending_idx]; + tx_info = &vif->pending_tx_info[pending_idx]; head = tx_info->head; /* Check error status: if okay then remember grant handle. */ @@ -1137,18 +962,19 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, newerr = (++gop)->status; if (newerr) break; - peek = netbk->pending_ring[pending_index(++head)]; - } while (!pending_tx_is_head(netbk, peek)); + peek = vif->pending_ring[pending_index(++head)]; + } while (!pending_tx_is_head(vif, peek)); if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); continue; } /* Error on this fragment: respond to client with an error. */ - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) @@ -1156,10 +982,11 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); for (j = start; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); } /* Remember the error: invalidate all subsequent fragments. */ @@ -1170,7 +997,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, return err; } -static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) +static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; @@ -1184,16 +1011,16 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) pending_idx = frag_get_pending_idx(frag); - txp = &netbk->pending_tx_info[pending_idx].req; - page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); + txp = &vif->pending_tx_info[pending_idx].req; + page = virt_to_page(idx_to_kaddr(vif, pending_idx)); __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; /* Take an extra reference to offset xen_netbk_idx_release */ - get_page(netbk->mmap_pages[pending_idx]); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + get_page(vif->mmap_pages[pending_idx]); + xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); } } @@ -1353,16 +1180,14 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) return false; } -static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) +static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) { - struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; + struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; - while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && - !list_empty(&netbk->net_schedule_list)) { - struct xenvif *vif; + while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX + < MAX_PENDING_REQS)) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct page *page; @@ -1373,16 +1198,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) unsigned int data_len; pending_ring_idx_t index; - /* Get a netif from the list with work to do. */ - vif = poll_net_schedule_list(netbk); - /* This can sometimes happen because the test of - * list_empty(net_schedule_list) at the top of the - * loop is unlocked. Just go back and have another - * look. - */ - if (!vif) - continue; - if (vif->tx.sring->req_prod - vif->tx.req_cons > XEN_NETIF_TX_RING_SIZE) { netdev_err(vif->dev, @@ -1395,10 +1210,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) } RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); - if (!work_to_do) { - xenvif_put(vif); - continue; - } + if (!work_to_do) + break; idx = vif->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ @@ -1406,10 +1219,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* Credit-based scheduling. */ if (txreq.size > vif->remaining_credit && - tx_credit_exceeded(vif, txreq.size)) { - xenvif_put(vif); - continue; - } + tx_credit_exceeded(vif, txreq.size)) + break; vif->remaining_credit -= txreq.size; @@ -1422,12 +1233,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) - continue; + break; } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) - continue; + break; idx += ret; @@ -1435,7 +1246,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); netbk_tx_err(vif, &txreq, idx); - continue; + break; } /* No crossing a page as the payload mustn't fragment. */ @@ -1445,11 +1256,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); netbk_fatal_tx_err(vif); - continue; + break; } - index = pending_index(netbk->pending_cons); - pending_idx = netbk->pending_ring[index]; + index = pending_index(vif->pending_cons); + pending_idx = vif->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? @@ -1474,16 +1285,16 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) if (netbk_set_skb_gso(vif, skb, gso)) { /* Failure in netbk_set_skb_gso is fatal. */ kfree_skb(skb); - continue; + break; } } /* XXX could copy straight to head */ - page = xen_netbk_alloc_page(netbk, pending_idx); + page = xen_netbk_alloc_page(vif, pending_idx); if (!page) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); - continue; + break; } gop->source.u.ref = txreq.gref; @@ -1499,10 +1310,9 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) gop++; - memcpy(&netbk->pending_tx_info[pending_idx].req, + memcpy(&vif->pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); - netbk->pending_tx_info[pending_idx].vif = vif; - netbk->pending_tx_info[pending_idx].head = index; + vif->pending_tx_info[pending_idx].head = index; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); @@ -1517,46 +1327,45 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) INVALID_PENDING_IDX); } - netbk->pending_cons++; + vif->pending_cons++; - request_gop = xen_netbk_get_requests(netbk, vif, - skb, txfrags, gop); + request_gop = xen_netbk_get_requests(vif, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); - continue; + break; } gop = request_gop; - __skb_queue_tail(&netbk->tx_queue, skb); + __skb_queue_tail(&vif->tx_queue, skb); vif->tx.req_cons = idx; - xen_netbk_check_rx_xenvif(vif); - if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) + if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops)) break; } - return gop - netbk->tx_copy_ops; + return gop - vif->tx_copy_ops; } -static void xen_netbk_tx_submit(struct xen_netbk *netbk) + +static int xen_netbk_tx_submit(struct xenvif *vif, int budget) { - struct gnttab_copy *gop = netbk->tx_copy_ops; + struct gnttab_copy *gop = vif->tx_copy_ops; struct sk_buff *skb; + int work_done = 0; - while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { + while (work_done < budget && + (skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; - struct xenvif *vif; u16 pending_idx; unsigned data_len; pending_idx = *((u16 *)skb->data); - vif = netbk->pending_tx_info[pending_idx].vif; - txp = &netbk->pending_tx_info[pending_idx].req; + txp = &vif->pending_tx_info[pending_idx].req; /* Check the remap error code. */ - if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { + if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) { netdev_dbg(vif->dev, "netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); @@ -1565,7 +1374,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) data_len = skb->len; memcpy(skb->data, - (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), + (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), data_len); if (data_len < txp->size) { /* Append the packet payload as a fragment. */ @@ -1573,7 +1382,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) txp->size -= data_len; } else { /* Schedule a response immediately. */ - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xen_netbk_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) @@ -1581,7 +1391,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; - xen_netbk_fill_frags(netbk, skb); + xen_netbk_fill_frags(vif, skb); /* * If the initial fragment was < PKT_PROT_LEN then @@ -1609,53 +1419,61 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; - xenvif_receive_skb(vif, skb); + work_done++; + + netif_receive_skb(skb); } + + return work_done; } /* Called after netfront has transmitted */ -static void xen_netbk_tx_action(struct xen_netbk *netbk) +int xen_netbk_tx_action(struct xenvif *vif, int budget) { unsigned nr_gops; + int work_done; - nr_gops = xen_netbk_tx_build_gops(netbk); + if (unlikely(!tx_work_todo(vif))) + return 0; + + nr_gops = xen_netbk_tx_build_gops(vif); if (nr_gops == 0) - return; + return 0; + + gnttab_batch_copy(vif->tx_copy_ops, nr_gops); - gnttab_batch_copy(netbk->tx_copy_ops, nr_gops); + work_done = xen_netbk_tx_submit(vif, nr_gops); - xen_netbk_tx_submit(netbk); + return work_done; } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, +static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, u8 status) { - struct xenvif *vif; struct pending_tx_info *pending_tx_info; pending_ring_idx_t head; u16 peek; /* peek into next tx request */ - BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL)); + BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL)); /* Already complete? */ - if (netbk->mmap_pages[pending_idx] == NULL) + if (vif->mmap_pages[pending_idx] == NULL) return; - pending_tx_info = &netbk->pending_tx_info[pending_idx]; + pending_tx_info = &vif->pending_tx_info[pending_idx]; - vif = pending_tx_info->vif; head = pending_tx_info->head; - BUG_ON(!pending_tx_is_head(netbk, head)); - BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx); + BUG_ON(!pending_tx_is_head(vif, head)); + BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx); do { pending_ring_idx_t index; pending_ring_idx_t idx = pending_index(head); - u16 info_idx = netbk->pending_ring[idx]; + u16 info_idx = vif->pending_ring[idx]; - pending_tx_info = &netbk->pending_tx_info[info_idx]; + pending_tx_info = &vif->pending_tx_info[info_idx]; make_tx_response(vif, &pending_tx_info->req, status); /* Setting any number other than @@ -1664,18 +1482,15 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, */ pending_tx_info->head = 0; - index = pending_index(netbk->pending_prod++); - netbk->pending_ring[index] = netbk->pending_ring[info_idx]; + index = pending_index(vif->pending_prod++); + vif->pending_ring[index] = vif->pending_ring[info_idx]; - xenvif_put(vif); + peek = vif->pending_ring[pending_index(++head)]; - peek = netbk->pending_ring[pending_index(++head)]; + } while (!pending_tx_is_head(vif, peek)); - } while (!pending_tx_is_head(netbk, peek)); - - netbk->mmap_pages[pending_idx]->mapping = 0; - put_page(netbk->mmap_pages[pending_idx]); - netbk->mmap_pages[pending_idx] = NULL; + put_page(vif->mmap_pages[pending_idx]); + vif->mmap_pages[pending_idx] = NULL; } @@ -1723,45 +1538,22 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, return resp; } -static inline int rx_work_todo(struct xen_netbk *netbk) +static inline int rx_work_todo(struct xenvif *vif) { - return !skb_queue_empty(&netbk->rx_queue); + return !skb_queue_empty(&vif->rx_queue); } -static inline int tx_work_todo(struct xen_netbk *netbk) +static inline int tx_work_todo(struct xenvif *vif) { - if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && - !list_empty(&netbk->net_schedule_list)) + if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && + (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX + < MAX_PENDING_REQS)) return 1; return 0; } -static int xen_netbk_kthread(void *data) -{ - struct xen_netbk *netbk = data; - while (!kthread_should_stop()) { - wait_event_interruptible(netbk->wq, - rx_work_todo(netbk) || - tx_work_todo(netbk) || - kthread_should_stop()); - cond_resched(); - - if (kthread_should_stop()) - break; - - if (rx_work_todo(netbk)) - xen_netbk_rx_action(netbk); - - if (tx_work_todo(netbk)) - xen_netbk_tx_action(netbk); - } - - return 0; -} - void xen_netbk_unmap_frontend_rings(struct xenvif *vif) { if (vif->tx.sring) @@ -1807,11 +1599,29 @@ err: return err; } +int xen_netbk_kthread(void *data) +{ + struct xenvif *vif = data; + + while (!kthread_should_stop()) { + wait_event_interruptible(vif->wq, + rx_work_todo(vif) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + if (rx_work_todo(vif)) + xen_netbk_rx_action(vif); + + cond_resched(); + } + + return 0; +} + static int __init netback_init(void) { - int i; int rc = 0; - int group; if (!xen_domain()) return -ENODEV; @@ -1822,48 +1632,6 @@ static int __init netback_init(void) fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; } - xen_netbk_group_nr = num_online_cpus(); - xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); - if (!xen_netbk) - return -ENOMEM; - - for (group = 0; group < xen_netbk_group_nr; group++) { - struct xen_netbk *netbk = &xen_netbk[group]; - skb_queue_head_init(&netbk->rx_queue); - skb_queue_head_init(&netbk->tx_queue); - - init_timer(&netbk->net_timer); - netbk->net_timer.data = (unsigned long)netbk; - netbk->net_timer.function = xen_netbk_alarm; - - netbk->pending_cons = 0; - netbk->pending_prod = MAX_PENDING_REQS; - for (i = 0; i < MAX_PENDING_REQS; i++) - netbk->pending_ring[i] = i; - - init_waitqueue_head(&netbk->wq); - netbk->task = kthread_create(xen_netbk_kthread, - (void *)netbk, - "netback/%u", group); - - if (IS_ERR(netbk->task)) { - pr_alert("kthread_create() fails at netback\n"); - del_timer(&netbk->net_timer); - rc = PTR_ERR(netbk->task); - goto failed_init; - } - - kthread_bind(netbk->task, group); - - INIT_LIST_HEAD(&netbk->net_schedule_list); - - spin_lock_init(&netbk->net_schedule_list_lock); - - atomic_set(&netbk->netfront_count, 0); - - wake_up_process(netbk->task); - } - rc = xenvif_xenbus_init(); if (rc) goto failed_init; @@ -1871,35 +1639,14 @@ static int __init netback_init(void) return 0; failed_init: - while (--group >= 0) { - struct xen_netbk *netbk = &xen_netbk[group]; - del_timer(&netbk->net_timer); - kthread_stop(netbk->task); - } - vfree(xen_netbk); return rc; - } module_init(netback_init); static void __exit netback_fini(void) { - int i, j; - xenvif_xenbus_fini(); - - for (i = 0; i < xen_netbk_group_nr; i++) { - struct xen_netbk *netbk = &xen_netbk[i]; - del_timer_sync(&netbk->net_timer); - kthread_stop(netbk->task); - for (j = 0; j < MAX_PENDING_REQS; j++) { - if (netbk->mmap_pages[j]) - __free_page(netbk->mmap_pages[j]); - } - } - - vfree(xen_netbk); } module_exit(netback_fini); -- cgit v0.10.2 From 7376419a4697657b2e0ab904a592aacc2e485bf1 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Mon, 26 Aug 2013 12:59:39 +0100 Subject: xen-netback: rename functions As we move to 1:1 model and melt xen_netbk and xenvif together, it would be better to use single prefix for all functions in xen-netback. Signed-off-by: Wei Liu Acked-by: Ian Campbell Signed-off-by: David S. Miller diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 9c1f158..a197743 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -190,21 +190,21 @@ void xenvif_xenbus_fini(void); int xenvif_schedulable(struct xenvif *vif); -int xen_netbk_rx_ring_full(struct xenvif *vif); +int xenvif_rx_ring_full(struct xenvif *vif); -int xen_netbk_must_stop_queue(struct xenvif *vif); +int xenvif_must_stop_queue(struct xenvif *vif); /* (Un)Map communication rings. */ -void xen_netbk_unmap_frontend_rings(struct xenvif *vif); -int xen_netbk_map_frontend_rings(struct xenvif *vif, - grant_ref_t tx_ring_ref, - grant_ref_t rx_ring_ref); +void xenvif_unmap_frontend_rings(struct xenvif *vif); +int xenvif_map_frontend_rings(struct xenvif *vif, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref); /* Check for SKBs from frontend and schedule backend processing */ -void xen_netbk_check_rx_xenvif(struct xenvif *vif); +void xenvif_check_rx_xenvif(struct xenvif *vif); /* Queue an SKB for transmission to the frontend */ -void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); +void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); /* Notify xenvif that ring now has space to send an skb to the frontend */ void xenvif_notify_tx_completion(struct xenvif *vif); @@ -212,12 +212,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif); void xenvif_carrier_off(struct xenvif *vif); /* Returns number of ring slots required to send an skb to the frontend */ -unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); +unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); -int xen_netbk_tx_action(struct xenvif *vif, int budget); -void xen_netbk_rx_action(struct xenvif *vif); +int xenvif_tx_action(struct xenvif *vif, int budget); +void xenvif_rx_action(struct xenvif *vif); -int xen_netbk_kthread(void *data); +int xenvif_kthread(void *data); extern bool separate_tx_rx_irq; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 44d6b70..625c6f4 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -48,7 +48,7 @@ int xenvif_schedulable(struct xenvif *vif) static int xenvif_rx_schedulable(struct xenvif *vif) { - return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); + return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); } static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) @@ -66,7 +66,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) struct xenvif *vif = container_of(napi, struct xenvif, napi); int work_done; - work_done = xen_netbk_tx_action(vif, budget); + work_done = xenvif_tx_action(vif, budget); if (work_done < budget) { int more_to_do = 0; @@ -133,12 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; /* Reserve ring slots for the worst-case number of fragments. */ - vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); + vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); - if (vif->can_queue && xen_netbk_must_stop_queue(vif)) + if (vif->can_queue && xenvif_must_stop_queue(vif)) netif_stop_queue(dev); - xen_netbk_queue_tx_skb(vif, skb); + xenvif_queue_tx_skb(vif, skb); return NETDEV_TX_OK; @@ -166,7 +166,7 @@ static void xenvif_up(struct xenvif *vif) enable_irq(vif->tx_irq); if (vif->tx_irq != vif->rx_irq) enable_irq(vif->rx_irq); - xen_netbk_check_rx_xenvif(vif); + xenvif_check_rx_xenvif(vif); } static void xenvif_down(struct xenvif *vif) @@ -368,7 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, __module_get(THIS_MODULE); - err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); + err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); if (err < 0) goto err; @@ -405,7 +405,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, } init_waitqueue_head(&vif->wq); - vif->task = kthread_create(xen_netbk_kthread, + vif->task = kthread_create(xenvif_kthread, (void *)vif, vif->dev->name); if (IS_ERR(vif->task)) { pr_warn("Could not allocate kthread for %s\n", vif->dev->name); @@ -433,7 +433,7 @@ err_tx_unbind: unbind_from_irqhandler(vif->tx_irq, vif); vif->tx_irq = 0; err_unmap: - xen_netbk_unmap_frontend_rings(vif); + xenvif_unmap_frontend_rings(vif); err: module_put(THIS_MODULE); return err; @@ -481,7 +481,7 @@ void xenvif_disconnect(struct xenvif *vif) unregister_netdev(vif->dev); - xen_netbk_unmap_frontend_rings(vif); + xenvif_unmap_frontend_rings(vif); free_netdev(vif->dev); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 44ccc67..956130c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -80,8 +80,9 @@ static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx) return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; } -static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, - u8 status); +static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, + u8 status); + static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); @@ -150,7 +151,7 @@ static int max_required_rx_slots(struct xenvif *vif) return max; } -int xen_netbk_rx_ring_full(struct xenvif *vif) +int xenvif_rx_ring_full(struct xenvif *vif) { RING_IDX peek = vif->rx_req_cons_peek; RING_IDX needed = max_required_rx_slots(vif); @@ -159,16 +160,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif) ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); } -int xen_netbk_must_stop_queue(struct xenvif *vif) +int xenvif_must_stop_queue(struct xenvif *vif) { - if (!xen_netbk_rx_ring_full(vif)) + if (!xenvif_rx_ring_full(vif)) return 0; vif->rx.sring->req_event = vif->rx_req_cons_peek + max_required_rx_slots(vif); mb(); /* request notification /then/ check the queue */ - return xen_netbk_rx_ring_full(vif); + return xenvif_rx_ring_full(vif); } /* @@ -214,9 +215,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) /* * Figure out how many ring slots we're going to need to send @skb to * the guest. This function is essentially a dry run of - * netbk_gop_frag_copy. + * xenvif_gop_frag_copy. */ -unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) +unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) { unsigned int count; int i, copy_off; @@ -296,10 +297,10 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. */ -static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, - struct netrx_pending_operations *npo, - struct page *page, unsigned long size, - unsigned long offset, int *head) +static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, + struct netrx_pending_operations *npo, + struct page *page, unsigned long size, + unsigned long offset, int *head) { struct gnttab_copy *copy_gop; struct xenvif_rx_meta *meta; @@ -382,8 +383,8 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, * zero GSO descriptors (for non-GSO packets) or one descriptor (for * frontend-side LRO). */ -static int netbk_gop_skb(struct sk_buff *skb, - struct netrx_pending_operations *npo) +static int xenvif_gop_skb(struct sk_buff *skb, + struct netrx_pending_operations *npo) { struct xenvif *vif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; @@ -426,30 +427,30 @@ static int netbk_gop_skb(struct sk_buff *skb, if (data + len > skb_tail_pointer(skb)) len = skb_tail_pointer(skb) - data; - netbk_gop_frag_copy(vif, skb, npo, - virt_to_page(data), len, offset, &head); + xenvif_gop_frag_copy(vif, skb, npo, + virt_to_page(data), len, offset, &head); data += len; } for (i = 0; i < nr_frags; i++) { - netbk_gop_frag_copy(vif, skb, npo, - skb_frag_page(&skb_shinfo(skb)->frags[i]), - skb_frag_size(&skb_shinfo(skb)->frags[i]), - skb_shinfo(skb)->frags[i].page_offset, - &head); + xenvif_gop_frag_copy(vif, skb, npo, + skb_frag_page(&skb_shinfo(skb)->frags[i]), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + skb_shinfo(skb)->frags[i].page_offset, + &head); } return npo->meta_prod - old_meta_prod; } /* - * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was + * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was * used to set up the operations on the top of * netrx_pending_operations, which have since been done. Check that * they didn't give any errors and advance over them. */ -static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, - struct netrx_pending_operations *npo) +static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, + struct netrx_pending_operations *npo) { struct gnttab_copy *copy_op; int status = XEN_NETIF_RSP_OKAY; @@ -468,9 +469,9 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, return status; } -static void netbk_add_frag_responses(struct xenvif *vif, int status, - struct xenvif_rx_meta *meta, - int nr_meta_slots) +static void xenvif_add_frag_responses(struct xenvif *vif, int status, + struct xenvif_rx_meta *meta, + int nr_meta_slots) { int i; unsigned long offset; @@ -498,12 +499,12 @@ struct skb_cb_overlay { int meta_slots_used; }; -static void xen_netbk_kick_thread(struct xenvif *vif) +static void xenvif_kick_thread(struct xenvif *vif) { wake_up(&vif->wq); } -void xen_netbk_rx_action(struct xenvif *vif) +void xenvif_rx_action(struct xenvif *vif) { s8 status; u16 flags; @@ -532,7 +533,7 @@ void xen_netbk_rx_action(struct xenvif *vif) nr_frags = skb_shinfo(skb)->nr_frags; sco = (struct skb_cb_overlay *)skb->cb; - sco->meta_slots_used = netbk_gop_skb(skb, &npo); + sco->meta_slots_used = xenvif_gop_skb(skb, &npo); count += nr_frags + 1; @@ -575,7 +576,7 @@ void xen_netbk_rx_action(struct xenvif *vif) vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; - status = netbk_check_gop(vif, sco->meta_slots_used, &npo); + status = xenvif_check_gop(vif, sco->meta_slots_used, &npo); if (sco->meta_slots_used == 1) flags = 0; @@ -611,9 +612,9 @@ void xen_netbk_rx_action(struct xenvif *vif) gso->flags = 0; } - netbk_add_frag_responses(vif, status, - vif->meta + npo.meta_cons + 1, - sco->meta_slots_used); + xenvif_add_frag_responses(vif, status, + vif->meta + npo.meta_cons + 1, + sco->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); @@ -631,17 +632,17 @@ void xen_netbk_rx_action(struct xenvif *vif) /* More work to do? */ if (!skb_queue_empty(&vif->rx_queue)) - xen_netbk_kick_thread(vif); + xenvif_kick_thread(vif); } -void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) +void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) { skb_queue_tail(&vif->rx_queue, skb); - xen_netbk_kick_thread(vif); + xenvif_kick_thread(vif); } -void xen_netbk_check_rx_xenvif(struct xenvif *vif) +void xenvif_check_rx_xenvif(struct xenvif *vif) { int more_to_do; @@ -675,11 +676,11 @@ static void tx_credit_callback(unsigned long data) { struct xenvif *vif = (struct xenvif *)data; tx_add_credit(vif); - xen_netbk_check_rx_xenvif(vif); + xenvif_check_rx_xenvif(vif); } -static void netbk_tx_err(struct xenvif *vif, - struct xen_netif_tx_request *txp, RING_IDX end) +static void xenvif_tx_err(struct xenvif *vif, + struct xen_netif_tx_request *txp, RING_IDX end) { RING_IDX cons = vif->tx.req_cons; @@ -692,16 +693,16 @@ static void netbk_tx_err(struct xenvif *vif, vif->tx.req_cons = cons; } -static void netbk_fatal_tx_err(struct xenvif *vif) +static void xenvif_fatal_tx_err(struct xenvif *vif) { netdev_err(vif->dev, "fatal error; disabling device\n"); xenvif_carrier_off(vif); } -static int netbk_count_requests(struct xenvif *vif, - struct xen_netif_tx_request *first, - struct xen_netif_tx_request *txp, - int work_to_do) +static int xenvif_count_requests(struct xenvif *vif, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txp, + int work_to_do) { RING_IDX cons = vif->tx.req_cons; int slots = 0; @@ -718,7 +719,7 @@ static int netbk_count_requests(struct xenvif *vif, netdev_err(vif->dev, "Asked for %d slots but exceeds this limit\n", work_to_do); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -ENODATA; } @@ -729,7 +730,7 @@ static int netbk_count_requests(struct xenvif *vif, netdev_err(vif->dev, "Malicious frontend using %d slots, threshold %u\n", slots, fatal_skb_slots); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -E2BIG; } @@ -777,7 +778,7 @@ static int netbk_count_requests(struct xenvif *vif, if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", txp->offset, txp->size); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -EINVAL; } @@ -789,15 +790,15 @@ static int netbk_count_requests(struct xenvif *vif, } while (more_data); if (drop_err) { - netbk_tx_err(vif, first, cons + slots); + xenvif_tx_err(vif, first, cons + slots); return drop_err; } return slots; } -static struct page *xen_netbk_alloc_page(struct xenvif *vif, - u16 pending_idx) +static struct page *xenvif_alloc_page(struct xenvif *vif, + u16 pending_idx) { struct page *page; @@ -809,10 +810,10 @@ static struct page *xen_netbk_alloc_page(struct xenvif *vif, return page; } -static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, - struct sk_buff *skb, - struct xen_netif_tx_request *txp, - struct gnttab_copy *gop) +static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, + struct sk_buff *skb, + struct xen_netif_tx_request *txp, + struct gnttab_copy *gop) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; @@ -835,7 +836,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, /* Coalesce tx requests, at this point the packet passed in * should be <= 64K. Any packets larger than 64K have been - * handled in netbk_count_requests(). + * handled in xenvif_count_requests(). */ for (shinfo->nr_frags = slot = start; slot < nr_slots; shinfo->nr_frags++) { @@ -918,20 +919,20 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xenvif *vif, err: /* Unwind, freeing all pages and sending error responses. */ while (shinfo->nr_frags-- > start) { - xen_netbk_idx_release(vif, + xenvif_idx_release(vif, frag_get_pending_idx(&frags[shinfo->nr_frags]), XEN_NETIF_RSP_ERROR); } /* The head too, if necessary. */ if (start) - xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); + xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); return NULL; } -static int xen_netbk_tx_check_gop(struct xenvif *vif, - struct sk_buff *skb, - struct gnttab_copy **gopp) +static int xenvif_tx_check_gop(struct xenvif *vif, + struct sk_buff *skb, + struct gnttab_copy **gopp) { struct gnttab_copy *gop = *gopp; u16 pending_idx = *((u16 *)skb->data); @@ -944,7 +945,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, /* Check status of header. */ err = gop->status; if (unlikely(err)) - xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); + xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); @@ -968,13 +969,13 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) - xen_netbk_idx_release(vif, pending_idx, - XEN_NETIF_RSP_OKAY); + xenvif_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); continue; } /* Error on this fragment: respond to client with an error. */ - xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); + xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) @@ -982,11 +983,11 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); - xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); + xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); for (j = start; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xen_netbk_idx_release(vif, pending_idx, - XEN_NETIF_RSP_OKAY); + xenvif_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); } /* Remember the error: invalidate all subsequent fragments. */ @@ -997,7 +998,7 @@ static int xen_netbk_tx_check_gop(struct xenvif *vif, return err; } -static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) +static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; @@ -1018,13 +1019,13 @@ static void xen_netbk_fill_frags(struct xenvif *vif, struct sk_buff *skb) skb->data_len += txp->size; skb->truesize += txp->size; - /* Take an extra reference to offset xen_netbk_idx_release */ + /* Take an extra reference to offset xenvif_idx_release */ get_page(vif->mmap_pages[pending_idx]); - xen_netbk_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); + xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); } } -static int xen_netbk_get_extras(struct xenvif *vif, +static int xenvif_get_extras(struct xenvif *vif, struct xen_netif_extra_info *extras, int work_to_do) { @@ -1034,7 +1035,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, do { if (unlikely(work_to_do-- <= 0)) { netdev_err(vif->dev, "Missing extra info\n"); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -EBADR; } @@ -1045,7 +1046,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, vif->tx.req_cons = ++cons; netdev_err(vif->dev, "Invalid extra type: %d\n", extra.type); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -EINVAL; } @@ -1056,20 +1057,20 @@ static int xen_netbk_get_extras(struct xenvif *vif, return work_to_do; } -static int netbk_set_skb_gso(struct xenvif *vif, - struct sk_buff *skb, - struct xen_netif_extra_info *gso) +static int xenvif_set_skb_gso(struct xenvif *vif, + struct sk_buff *skb, + struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { netdev_err(vif->dev, "GSO size must not be zero.\n"); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -EINVAL; } @@ -1180,7 +1181,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) return false; } -static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) +static unsigned xenvif_tx_build_gops(struct xenvif *vif) { struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; struct sk_buff *skb; @@ -1205,7 +1206,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) "req_prod %d, req_cons %d, size %ld\n", vif->tx.sring->req_prod, vif->tx.req_cons, XEN_NETIF_TX_RING_SIZE); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); continue; } @@ -1229,14 +1230,14 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) memset(extras, 0, sizeof(extras)); if (txreq.flags & XEN_NETTXF_extra_info) { - work_to_do = xen_netbk_get_extras(vif, extras, - work_to_do); + work_to_do = xenvif_get_extras(vif, extras, + work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) break; } - ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); + ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) break; @@ -1245,7 +1246,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); - netbk_tx_err(vif, &txreq, idx); + xenvif_tx_err(vif, &txreq, idx); break; } @@ -1255,7 +1256,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); break; } @@ -1271,7 +1272,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) if (unlikely(skb == NULL)) { netdev_dbg(vif->dev, "Can't allocate a skb in start_xmit.\n"); - netbk_tx_err(vif, &txreq, idx); + xenvif_tx_err(vif, &txreq, idx); break; } @@ -1282,18 +1283,18 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; - if (netbk_set_skb_gso(vif, skb, gso)) { - /* Failure in netbk_set_skb_gso is fatal. */ + if (xenvif_set_skb_gso(vif, skb, gso)) { + /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); break; } } /* XXX could copy straight to head */ - page = xen_netbk_alloc_page(vif, pending_idx); + page = xenvif_alloc_page(vif, pending_idx); if (!page) { kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); + xenvif_tx_err(vif, &txreq, idx); break; } @@ -1329,10 +1330,10 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) vif->pending_cons++; - request_gop = xen_netbk_get_requests(vif, skb, txfrags, gop); + request_gop = xenvif_get_requests(vif, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); + xenvif_tx_err(vif, &txreq, idx); break; } gop = request_gop; @@ -1349,7 +1350,7 @@ static unsigned xen_netbk_tx_build_gops(struct xenvif *vif) } -static int xen_netbk_tx_submit(struct xenvif *vif, int budget) +static int xenvif_tx_submit(struct xenvif *vif, int budget) { struct gnttab_copy *gop = vif->tx_copy_ops; struct sk_buff *skb; @@ -1365,7 +1366,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) txp = &vif->pending_tx_info[pending_idx].req; /* Check the remap error code. */ - if (unlikely(xen_netbk_tx_check_gop(vif, skb, &gop))) { + if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) { netdev_dbg(vif->dev, "netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); @@ -1382,8 +1383,8 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) txp->size -= data_len; } else { /* Schedule a response immediately. */ - xen_netbk_idx_release(vif, pending_idx, - XEN_NETIF_RSP_OKAY); + xenvif_idx_release(vif, pending_idx, + XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) @@ -1391,7 +1392,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; - xen_netbk_fill_frags(vif, skb); + xenvif_fill_frags(vif, skb); /* * If the initial fragment was < PKT_PROT_LEN then @@ -1428,7 +1429,7 @@ static int xen_netbk_tx_submit(struct xenvif *vif, int budget) } /* Called after netfront has transmitted */ -int xen_netbk_tx_action(struct xenvif *vif, int budget) +int xenvif_tx_action(struct xenvif *vif, int budget) { unsigned nr_gops; int work_done; @@ -1436,20 +1437,20 @@ int xen_netbk_tx_action(struct xenvif *vif, int budget) if (unlikely(!tx_work_todo(vif))) return 0; - nr_gops = xen_netbk_tx_build_gops(vif); + nr_gops = xenvif_tx_build_gops(vif); if (nr_gops == 0) return 0; gnttab_batch_copy(vif->tx_copy_ops, nr_gops); - work_done = xen_netbk_tx_submit(vif, nr_gops); + work_done = xenvif_tx_submit(vif, nr_gops); return work_done; } -static void xen_netbk_idx_release(struct xenvif *vif, u16 pending_idx, - u8 status) +static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, + u8 status) { struct pending_tx_info *pending_tx_info; pending_ring_idx_t head; @@ -1554,7 +1555,7 @@ static inline int tx_work_todo(struct xenvif *vif) return 0; } -void xen_netbk_unmap_frontend_rings(struct xenvif *vif) +void xenvif_unmap_frontend_rings(struct xenvif *vif) { if (vif->tx.sring) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), @@ -1564,9 +1565,9 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif) vif->rx.sring); } -int xen_netbk_map_frontend_rings(struct xenvif *vif, - grant_ref_t tx_ring_ref, - grant_ref_t rx_ring_ref) +int xenvif_map_frontend_rings(struct xenvif *vif, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref) { void *addr; struct xen_netif_tx_sring *txs; @@ -1595,11 +1596,11 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif, return 0; err: - xen_netbk_unmap_frontend_rings(vif); + xenvif_unmap_frontend_rings(vif); return err; } -int xen_netbk_kthread(void *data) +int xenvif_kthread(void *data) { struct xenvif *vif = data; @@ -1611,7 +1612,7 @@ int xen_netbk_kthread(void *data) break; if (rx_work_todo(vif)) - xen_netbk_rx_action(vif); + xenvif_rx_action(vif); cond_resched(); } -- cgit v0.10.2 From 80b17be70b63646cb141bf1826afe32281e561ec Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 27 Aug 2013 04:16:22 +0300 Subject: qlcnic: underflow in qlcnic_validate_max_tx_rings() This function checks the upper bound but it doesn't check for negative numbers: if (txq > QLCNIC_MAX_TX_RINGS) { I've solved this by making "txq" a u32 type. I chose that because ->tx_count in the ethtool_channels struct is a __u32. This bug was added in aa4a1f7df7 ('qlcnic: Enable Tx queue changes using ethtool for 82xx Series adapter.'). Signed-off-by: Dan Carpenter Acked-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 156a78e..22bd425 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1542,7 +1542,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test); netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int); int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); -int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, int); +int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq); void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); int qlcnic_enable_msix(struct qlcnic_adapter *, u32); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index df96f66..7dde3ba 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3548,7 +3548,7 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) return err; } -int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, int txq) +int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq) { struct net_device *netdev = adapter->netdev; u8 max_hw = QLCNIC_MAX_TX_RINGS; -- cgit v0.10.2 From f4f1040ae63c7c40ef24c1892d32ceae46d9ea05 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 25 Jun 2013 07:59:23 +0000 Subject: ixgbe: disable link when adapter goes down This patch fixes an issue with the 82599 adapter where it can potentially keep link lights up when the adapter has gone down. The patch adds a function which ensures link is disabled, and calls this function when the adapter transitions to a down state. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 207f68f..51ee8ad 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -49,6 +49,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); +static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, @@ -432,6 +433,24 @@ out: } /** + * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link, should be called during D3 power down sequence. + * + */ +static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) +{ + u32 autoc2_reg; + + if (!hw->mng_fw_enabled && !hw->wol_enabled) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + } +} + +/** * ixgbe_start_mac_link_82599 - Setup MAC link settings * @hw: pointer to hardware structure * @autoneg_wait_to_complete: true when waiting for completion is needed @@ -2477,6 +2496,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, + .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, .setup_link = &ixgbe_setup_mac_link_82599, .set_rxpba = &ixgbe_set_rxpba_generic, .check_link = &ixgbe_check_mac_link_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 128d6b8..cf1b41e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5292,6 +5292,9 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return retval; #endif + if (hw->mac.ops.stop_link_on_d3) + hw->mac.ops.stop_link_on_d3(hw); + if (wufc) { ixgbe_set_rx_mode(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 161ff18..6442cf8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1596,6 +1596,7 @@ enum { #define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 #define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 #define IXGBE_MACC_FLU 0x00000001 @@ -2847,6 +2848,7 @@ struct ixgbe_mac_operations { void (*disable_tx_laser)(struct ixgbe_hw *); void (*enable_tx_laser)(struct ixgbe_hw *); void (*flap_tx_laser)(struct ixgbe_hw *); + void (*stop_link_on_d3)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, -- cgit v0.10.2 From be0c27b4edb9ee58c4e72ee7e460a0bed7ecef79 Mon Sep 17 00:00:00 2001 From: Mark Rustad Date: Fri, 24 May 2013 07:31:09 +0000 Subject: ixgbe: Check return value on eeprom reads This patch fixes the possible use of uninitialized memory by checking the return value on eeprom reads. These issues were identified by static analysis. In many cases error messages will be produced so that corrupted eeprom issues will be more visible. Signed-off-by: Mark Rustad Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 51ee8ad..e4f4f4b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -142,11 +142,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) goto setup_sfp_out; } - hw->eeprom.ops.read(hw, ++data_offset, &data_value); + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; while (data_value != 0xffff) { IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); IXGBE_WRITE_FLUSH(hw); - hw->eeprom.ops.read(hw, ++data_offset, &data_value); + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; } /* Release the semaphore */ @@ -192,6 +194,17 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) setup_sfp_out: return ret_val; + +setup_sfp_err: + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access, + * semaphore_delay is in ms usleep_range needs us. + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); + hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; } static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) @@ -2180,6 +2193,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM_VERSION; u16 fw_offset, fw_ptp_cfg_offset; + u16 offset; u16 fw_version = 0; /* firmware check is only necessary for SFI devices */ @@ -2189,29 +2203,35 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) } /* get the offset to the Firmware Module block */ - hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + offset = IXGBE_FW_PTR; + if (hw->eeprom.ops.read(hw, offset, &fw_offset)) + goto fw_version_err; if ((fw_offset == 0) || (fw_offset == 0xFFFF)) goto fw_version_out; /* get the offset to the Pass Through Patch Configuration block */ - hw->eeprom.ops.read(hw, (fw_offset + - IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), - &fw_ptp_cfg_offset); + offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; + if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) + goto fw_version_err; if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) goto fw_version_out; /* get the firmware version */ - hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + - IXGBE_FW_PATCH_VERSION_4), - &fw_version); + offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; + if (hw->eeprom.ops.read(hw, offset, &fw_version)) + goto fw_version_err; if (fw_version > 0x5) status = 0; fw_version_out: return status; + +fw_version_err: + hw_err(hw, "eeprom read at offset %d failed\n", offset); + return IXGBE_ERR_EEPROM_VERSION; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 50e62a2..b5c434b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2740,13 +2740,19 @@ out: static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, u16 *san_mac_offset) { + s32 ret_val; + /* * First read the EEPROM pointer to see if the MAC addresses are * available. */ - hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); + ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) + hw_err(hw, "eeprom read at offset %d failed\n", + IXGBE_SAN_MAC_ADDR_PTR); - return 0; + return ret_val; } /** @@ -2763,23 +2769,16 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) { u16 san_mac_data, san_mac_offset; u8 i; + s32 ret_val; /* * First read the EEPROM pointer to see if the MAC addresses are * available. If they're not, no point in calling set_lan_id() here. */ - ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) - if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { - /* - * No addresses available in this EEPROM. It's not an - * error though, so just wipe the local address and return. - */ - for (i = 0; i < 6; i++) - san_mac_addr[i] = 0xFF; - - goto san_mac_addr_out; - } + goto san_mac_addr_clr; /* make sure we know which port we need to program */ hw->mac.ops.set_lan_id(hw); @@ -2787,14 +2786,26 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); for (i = 0; i < 3; i++) { - hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); + ret_val = hw->eeprom.ops.read(hw, san_mac_offset, + &san_mac_data); + if (ret_val) { + hw_err(hw, "eeprom read at offset %d failed\n", + san_mac_offset); + goto san_mac_addr_clr; + } san_mac_addr[i * 2] = (u8)(san_mac_data); san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); san_mac_offset++; } - -san_mac_addr_out: return 0; + +san_mac_addr_clr: + /* No addresses available in this EEPROM. It's not necessarily an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return ret_val; } /** @@ -3243,8 +3254,9 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, *wwpn_prefix = 0xFFFF; /* check if alternative SAN MAC is supported */ - hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, - &alt_san_mac_blk_offset); + offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; if ((alt_san_mac_blk_offset == 0) || (alt_san_mac_blk_offset == 0xFFFF)) @@ -3252,19 +3264,26 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, /* check capability in alternative san mac address block */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; - hw->eeprom.ops.read(hw, offset, &caps); + if (hw->eeprom.ops.read(hw, offset, &caps)) + goto wwn_prefix_err; if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) goto wwn_prefix_out; /* get the corresponding prefix for WWNN/WWPN */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; - hw->eeprom.ops.read(hw, offset, wwnn_prefix); + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) + hw_err(hw, "eeprom read at offset %d failed\n", offset); offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; - hw->eeprom.ops.read(hw, offset, wwpn_prefix); + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) + goto wwn_prefix_err; wwn_prefix_out: return 0; + +wwn_prefix_err: + hw_err(hw, "eeprom read at offset %d failed\n", offset); + return 0; } /** @@ -3778,7 +3797,11 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) u8 sensor_index; u8 sensor_location; - hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor); + if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { + hw_err(hw, "eeprom read at offset %d failed\n", + ets_offset + 1 + i); + continue; + } sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> IXGBE_ETS_DATA_INDEX_SHIFT); sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 1315b8a..d259dc7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -143,8 +143,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) +#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev) + #define hw_dbg(hw, format, arg...) \ - netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg) + netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) #define e_dev_info(format, arg...) \ dev_info(&adapter->pdev->dev, format, ## arg) #define e_dev_warn(format, arg...) \ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 369eef5..6a4a799 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -791,6 +791,8 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) * Read control word from PHY init contents offset */ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); + if (ret_val) + goto err_eeprom; control = (eword & IXGBE_CONTROL_MASK_NL) >> IXGBE_CONTROL_SHIFT_NL; edata = eword & IXGBE_DATA_MASK_NL; @@ -803,10 +805,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) case IXGBE_DATA_NL: hw_dbg(hw, "DATA:\n"); data_offset++; - hw->eeprom.ops.read(hw, data_offset++, - &phy_offset); + ret_val = hw->eeprom.ops.read(hw, data_offset++, + &phy_offset); + if (ret_val) + goto err_eeprom; for (i = 0; i < edata; i++) { - hw->eeprom.ops.read(hw, data_offset, &eword); + ret_val = hw->eeprom.ops.read(hw, data_offset, + &eword); + if (ret_val) + goto err_eeprom; hw->phy.ops.write_reg(hw, phy_offset, MDIO_MMD_PMAPMD, eword); hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, @@ -838,6 +845,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) out: return ret_val; + +err_eeprom: + hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + return IXGBE_ERR_PHY; } /** @@ -1339,7 +1350,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, sfp_type = ixgbe_sfp_type_srlr_core1; /* Read offset to PHY init contents */ - hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); + if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { + hw_err(hw, "eeprom read at %d failed\n", + IXGBE_PHY_INIT_OFFSET_NL); + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + } if ((!*list_offset) || (*list_offset == 0xFFFF)) return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; @@ -1351,12 +1366,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, * Find the matching SFP ID in the EEPROM * and program the init sequence */ - hw->eeprom.ops.read(hw, *list_offset, &sfp_id); + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; while (sfp_id != IXGBE_PHY_INIT_END_NL) { if (sfp_id == sfp_type) { (*list_offset)++; - hw->eeprom.ops.read(hw, *list_offset, data_offset); + if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) + goto err_phy; if ((!*data_offset) || (*data_offset == 0xFFFF)) { hw_dbg(hw, "SFP+ module not supported\n"); return IXGBE_ERR_SFP_NOT_SUPPORTED; @@ -1366,7 +1383,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, } else { (*list_offset) += 2; if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) - return IXGBE_ERR_PHY; + goto err_phy; } } @@ -1376,6 +1393,10 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, } return 0; + +err_phy: + hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); + return IXGBE_ERR_PHY; } /** -- cgit v0.10.2 From bd8a1b12907e314c3630c18da2bc0a07f5ae72e7 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 28 Jun 2013 05:35:50 +0000 Subject: ixgbe: fix incorrect limit value in ring transverse We were transversing the tx_ring with IXGBE_NUM_RX_QUEUES. Now this define happens to have the correct value but this is misleading and a change later could easily make this no longer true. I updated it to netdev->num_tx_queues like we use in ixgbe_get_strings(). Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 50c1e9b..db0dbf6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1049,7 +1049,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { + for (j = 0; j < netdev->num_tx_queues; j++) { ring = adapter->tx_ring[j]; if (!ring) { data[i] = 0; -- cgit v0.10.2 From 4ec375b1ec8ede2691a6511fff91a2b67fe73c3b Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 10 Jul 2013 02:47:24 +0000 Subject: ixgbe: fix link test when connected to 1Gbps link partner This patch is a partial reverse of: commit dfcc4615f09c33454bc553567f7c7506cae60cb9 Author: Jacob Keller Date: Thu Nov 8 07:07:08 2012 +0000 ixgbe: ethtool ixgbe_diag_test cleanup Specifically forcing the laser before the link check can lead to inconsistent results because it does not guarantee that the link will be negotiated correctly. Such is the case when dual speed SFP+ module is connected to a gigabit link partner. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index db0dbf6..b6f1592 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1885,11 +1885,12 @@ static void ixgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; bool if_running = netif_running(netdev); set_bit(__IXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + struct ixgbe_hw *hw = &adapter->hw; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { int i; for (i = 0; i < adapter->num_vfs; i++) { @@ -1913,21 +1914,18 @@ static void ixgbe_diag_test(struct net_device *netdev, /* Offline tests */ e_info(hw, "offline testing starting\n"); - if (if_running) - /* indicate we're in test mode */ - dev_close(netdev); - - /* bringing adapter down disables SFP+ optics */ - if (hw->mac.ops.enable_tx_laser) - hw->mac.ops.enable_tx_laser(hw); - /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; - ixgbe_reset(adapter); + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + ixgbe_reset(adapter); + e_info(hw, "register testing starting\n"); if (ixgbe_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1964,13 +1962,11 @@ skip_loopback: clear_bit(__IXGBE_TESTING, &adapter->state); if (if_running) dev_open(netdev); + else if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); } else { e_info(hw, "online testing starting\n"); - /* if adapter is down, SFP+ optics will be disabled */ - if (!if_running && hw->mac.ops.enable_tx_laser) - hw->mac.ops.enable_tx_laser(hw); - /* Online tests */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1984,9 +1980,6 @@ skip_loopback: clear_bit(__IXGBE_TESTING, &adapter->state); } - /* if adapter was down, ensure SFP+ optics are disabled again */ - if (!if_running && hw->mac.ops.disable_tx_laser) - hw->mac.ops.disable_tx_laser(hw); skip_ol_tests: msleep_interruptible(4 * 1000); } -- cgit v0.10.2 From b08e1ed9cfcf789421b7eccdd941f3f681c0a4ab Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 26 Jul 2013 07:34:54 +0000 Subject: ixgbe: zero out mailbox buffer on init This patch initializes the msgbuf array to 0 in order to avoid using random numbers from the memory as MAC address for the VF. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 73c8e73..276d7b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -639,8 +639,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; - u32 reg, msgbuf[4]; - u32 reg_offset, vf_shift; + u32 reg, reg_offset, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; u8 *addr = (u8 *)(&msgbuf[1]); e_info(probe, "VF Reset msg received from vf %d\n", vf); -- cgit v0.10.2 From 1b1bf31a12f11f4e08239845e70aa10389312eba Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 31 Jul 2013 05:27:04 +0000 Subject: ixgbe: cleanup some log messages Some minor log messages cleanup, changing the level one message is logged, adding a bit of detail to another and put all the text on one line. Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cf1b41e..4688057 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7605,10 +7605,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->mac.type == ixgbe_mac_82598EB) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - e_dev_err("failed to load because an unsupported SFP+ " - "module type was detected.\n"); - e_dev_err("Reload the driver after installing a supported " - "module.\n"); + e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported module.\n"); goto err_sw_init; } else if (err) { e_dev_err("HW Init failed: %d\n", err); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 6a4a799..033997b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1282,7 +1282,7 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) status = 0; } else { if (hw->allow_unsupported_sfp == true) { - e_warn(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); status = 0; } else { hw_dbg(hw, -- cgit v0.10.2 From 31c7d2b06bbfe496746ef47606230447456ecf9d Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 13 Aug 2013 04:59:29 +0000 Subject: ixgbe: fix SFF data dumps of SFP+ modules from an offset This patch fixes the read loop for the I2C data to account for the offset. Also includes a whitespace cleanup and removes ret_val as it is not needed. CC: Ben Hutchings Reported-by: Ben Hutchings Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Reviewed-by: Ben Hutchings Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index b6f1592..2d0308e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2946,28 +2946,27 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, u32 status = IXGBE_ERR_PHY_ADDR_INVALID; u8 databyte = 0xFF; int i = 0; - int ret_val = 0; if (ee->len == 0) return -EINVAL; - for (i = ee->offset; i < ee->len; i++) { + for (i = ee->offset; i < ee->offset + ee->len; i++) { /* I2C reads can take long time */ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return -EBUSY; if (i < ETH_MODULE_SFF_8079_LEN) - status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); else status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); if (status != 0) - ret_val = -EIO; + return -EIO; data[i - ee->offset] = databyte; } - return ret_val; + return 0; } static const struct ethtool_ops ixgbe_ethtool_ops = { -- cgit v0.10.2 From 61aaf9e8072a692eb4cd74ec8d1d47576612437d Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 13 Aug 2013 07:22:16 +0000 Subject: ixgbe: add 1Gbps support for QSFP+ This patch adds GB speed support for QSFP+ modules. Autonegotiation is not supported with QSFP+. The user will have to set the desired speed on both link partners using ethtool advertise setting. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e4f4f4b..007a008 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -379,8 +379,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; + IXGBE_LINK_SPEED_1GB_FULL; + + /* QSFP must not enable auto-negotiation */ + if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) + *autoneg = false; + else + *autoneg = true; } out: @@ -700,13 +705,18 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, goto out; /* Set the module link speed */ - if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { - ixgbe_set_fiber_fixed_speed(hw, - IXGBE_LINK_SPEED_10GB_FULL); - } else { + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type.\n"); + break; } /* Allow module to change analog characteristics (1G->10G) */ @@ -757,14 +767,23 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, goto out; /* Set the module link speed */ - if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_fixed: ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL); - } else { + break; + case ixgbe_media_type_fiber: esdp_reg &= ~IXGBE_ESDP_SDP5; esdp_reg |= IXGBE_ESDP_SDP5_DIR; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type.\n"); + break; } /* Allow module to change analog characteristics (10G->1G) */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 2d0308e..0e1b973 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -311,9 +311,6 @@ static int ixgbe_set_settings(struct net_device *netdev, * this function does not support duplex forcing, but can * limit the advertising of the adapter to the specified speed */ - if (ecmd->autoneg == AUTONEG_DISABLE) - return -EINVAL; - if (ecmd->advertising & ~ecmd->supported) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 033997b..e89b34e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1205,6 +1205,12 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) if (status != 0) goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, + &comp_codes_1g); + + if (status != 0) + goto err_read_i2c_eeprom; + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { hw->phy.type = ixgbe_phy_qsfp_passive_unknown; if (hw->bus.lan_id == 0) -- cgit v0.10.2 From 987e1d56b3157592d73f7f6170decada716fc415 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 14 Aug 2013 07:12:27 +0000 Subject: ixgbe: include QSFP PHY types in ixgbe_is_sfp() This patch makes sure that QSFP+ modules use the SFP+ code path for setting up link. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4688057..7aba452 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4175,6 +4175,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) case ixgbe_phy_sfp_passive_unknown: case ixgbe_phy_sfp_active_unknown: case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: return true; case ixgbe_phy_nl: if (hw->mac.type == ixgbe_mac_82598EB) -- cgit v0.10.2 From 9a84fea2ecdcdcd61d6efce77cb1152cd500219e Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 16 Aug 2013 23:11:14 +0000 Subject: ixgbe: add support for older QSFP active DA cables This patch adds support for QSFP active direct attach (DA) cables which pre-date SFF-8436 v3.6. Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e89b34e..e4c6760 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1175,6 +1175,10 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) u8 comp_codes_10g = 0; u8 oui_bytes[3] = {0, 0, 0}; u16 enforce_sfp = 0; + u8 connector = 0; + u8 cable_length = 0; + u8 device_tech = 0; + bool active_cable = false; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1217,12 +1221,6 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; - } else if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) { - hw->phy.type = ixgbe_phy_qsfp_active_unknown; - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0; - else - hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1; } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | IXGBE_SFF_10GBASELR_CAPABLE)) { if (hw->bus.lan_id == 0) @@ -1230,10 +1228,47 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) else hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; } else { - /* unsupported module type */ - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) + active_cable = true; + + if (!active_cable) { + /* check for active DA cables that pre-date + * SFF-8436 v3.6 + */ + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CONNECTOR, + &connector); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CABLE_LENGTH, + &cable_length); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_DEVICE_TECH, + &device_tech); + + if ((connector == + IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && + (cable_length > 0) && + ((device_tech >> 4) == + IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) + active_cable = true; + } + + if (active_cable) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } } if (hw->phy.sfp_type != stored_sfp_type) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 138dadd..24af12e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -50,8 +50,11 @@ #define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 #define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 #define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_CONNECTOR 0x82 #define IXGBE_SFF_QSFP_10GBE_COMP 0x83 #define IXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 /* Bitmasks */ #define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 @@ -68,6 +71,8 @@ #define IXGBE_SFF_ADDRESSING_MODE 0x4 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 -- cgit v0.10.2 From 38589cdcd0e77174ca9de9e11ff22e5c3ae95984 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 28 Nov 2012 23:11:18 +0000 Subject: sfc: Add support for new board sensors Add support for power and current sensors, which need to be named differently in sysfs. Power sensors also require values to be scaled between MCDI and sysfs, and have no minimum value. Add definitions of the power, current, fan, and additional temperature and voltage sensors found on SFA6902F, SFN7022F and SFN7122F. (Includes a bug fix from Andrew Jackson.) Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index d7d4566..e22d938 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -21,7 +21,9 @@ enum efx_hwmon_type { EFX_HWMON_UNKNOWN, EFX_HWMON_TEMP, /* temperature */ EFX_HWMON_COOL, /* cooling device, probably a heatsink */ - EFX_HWMON_IN /* input voltage */ + EFX_HWMON_IN, /* voltage */ + EFX_HWMON_CURR, /* current */ + EFX_HWMON_POWER, /* power */ }; static const struct { @@ -29,23 +31,52 @@ static const struct { enum efx_hwmon_type hwmon_type; int port; } efx_mcdi_sensor_type[] = { -#define SENSOR(name, label, hwmon_type, port) \ - [MC_CMD_SENSOR_##name] = { label, hwmon_type, port } - SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1), - SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1), - SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1), - SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0), - SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0), - SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1), - SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1), - SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1), - SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1), - SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1), - SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1), - SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1), - SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1), - SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1), - SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1), +#define SENSOR(name, label, hwmon_type, port) \ + [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port } + SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1), + SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1), + SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1), + SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0), + SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0), + SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1), + SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1), + SENSOR(IN_1V0, "1.0V supply", IN, -1), + SENSOR(IN_1V2, "1.2V supply", IN, -1), + SENSOR(IN_1V8, "1.8V supply", IN, -1), + SENSOR(IN_2V5, "2.5V supply", IN, -1), + SENSOR(IN_3V3, "3.3V supply", IN, -1), + SENSOR(IN_12V0, "12.0V supply", IN, -1), + SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1), + SENSOR(IN_VREF, "ref. voltage", IN, -1), + SENSOR(OUT_VAOE, "AOE power supply", IN, -1), + SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1), + SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1), + SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1), + SENSOR(FAN_0, NULL, COOL, -1), + SENSOR(FAN_1, NULL, COOL, -1), + SENSOR(FAN_2, NULL, COOL, -1), + SENSOR(FAN_3, NULL, COOL, -1), + SENSOR(FAN_4, NULL, COOL, -1), + SENSOR(IN_VAOE, "AOE input supply", IN, -1), + SENSOR(OUT_IAOE, "AOE output current", CURR, -1), + SENSOR(IN_IAOE, "AOE input current", CURR, -1), + SENSOR(NIC_POWER, "Board power use", POWER, -1), + SENSOR(IN_0V9, "0.9V supply", IN, -1), + SENSOR(IN_I0V9, "0.9V input current", CURR, -1), + SENSOR(IN_I1V2, "1.2V input current", CURR, -1), + SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1), + SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1), + SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1), + SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1), + SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1), + SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1), + SENSOR(CONTROLLER_VPTAT_EXTADC, + "Controller int. temp. raw (at ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC, + "Controller int. temp. (via ADC)", TEMP, -1), + SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1), + SENSOR(AIRFLOW, "Air flow raw", IN, -1), #undef SENSOR }; @@ -160,9 +191,19 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev, value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); - /* Convert temperature from degrees to milli-degrees Celsius */ - if (mon_attr->hwmon_type == EFX_HWMON_TEMP) + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } return sprintf(buf, "%u\n", value); } @@ -177,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev, value = mon_attr->limit_value; - /* Convert temperature from degrees to milli-degrees Celsius */ - if (mon_attr->hwmon_type == EFX_HWMON_TEMP) + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } return sprintf(buf, "%u\n", value); } @@ -243,8 +294,8 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, int efx_mcdi_mon_probe(struct efx_nic *efx) { + unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0; struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); - unsigned int n_temp = 0, n_cool = 0, n_in = 0; MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); unsigned int n_pages, n_sensors, n_attrs, page; @@ -380,6 +431,14 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) hwmon_prefix = "in"; hwmon_index = n_in++; /* 0-based */ break; + case EFX_HWMON_CURR: + hwmon_prefix = "curr"; + hwmon_index = ++n_curr; /* 1-based */ + break; + case EFX_HWMON_POWER: + hwmon_prefix = "power"; + hwmon_index = ++n_power; /* 1-based */ + break; } min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, @@ -399,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) if (rc) goto fail; - snprintf(name, sizeof(name), "%s%u_min", - hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( - efx, name, efx_mcdi_mon_show_limit, - i, type, min1); - if (rc) - goto fail; + if (hwmon_type != EFX_HWMON_POWER) { + snprintf(name, sizeof(name), "%s%u_min", + hwmon_prefix, hwmon_index); + rc = efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, min1); + if (rc) + goto fail; + } snprintf(name, sizeof(name), "%s%u_max", hwmon_prefix, hwmon_index); -- cgit v0.10.2 From 2f4bcdcca796ca7d385e3f870e552a2d85f0a7c9 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 22 Aug 2013 22:06:09 +0100 Subject: sfc: Refactor efx_mcdi_rpc_start() and efx_mcdi_copyin() Preparation for asynchronous MCDI requests. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 63121ad..89bc194 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -70,8 +70,8 @@ void efx_mcdi_fini(struct efx_nic *efx) kfree(efx->mcdi); } -static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen) +static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); efx_dword_t hdr[2]; @@ -80,6 +80,11 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); + /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + spin_unlock_bh(&mcdi->iface_lock); + seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) @@ -114,6 +119,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, } efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); + + mcdi->new_epoch = false; } static int efx_mcdi_errno(unsigned int mcdi_err) @@ -340,6 +347,22 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, efx_mcdi_complete(mcdi); } +static int +efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) +{ + if (efx->type->mcdi_max_ver < 0 || + (efx->type->mcdi_max_ver < 2 && + cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) + return -EINVAL; + + if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || + (efx->type->mcdi_max_ver < 2 && + inlen > MCDI_CTL_SDU_LEN_MAX_V1)) + return -EMSGSIZE; + + return 0; +} + int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, @@ -358,26 +381,14 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; - if (efx->type->mcdi_max_ver < 0 || - (efx->type->mcdi_max_ver < 2 && - cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) - return -EINVAL; - - if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || - (efx->type->mcdi_max_ver < 2 && - inlen > MCDI_CTL_SDU_LEN_MAX_V1)) - return -EMSGSIZE; + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; efx_mcdi_acquire(mcdi); - - /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ - spin_lock_bh(&mcdi->iface_lock); - ++mcdi->seqno; - spin_unlock_bh(&mcdi->iface_lock); - - efx_mcdi_copyin(efx, cmd, inbuf, inlen); - mcdi->new_epoch = false; + efx_mcdi_send_request(efx, cmd, inbuf, inlen); return 0; } -- cgit v0.10.2 From 251111d9a1bd9a26e25446d876156bf265858cb5 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 27 Aug 2013 23:04:29 +0100 Subject: sfc: Remove unnecessary use of atomic_t We can set, get and compare-and-exchange without using atomic_t. Change efx_mcdi_iface::state to the enum type we really wanted it to be. Suggested-by: David Miller Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 89bc194..c616fb5 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -8,6 +8,7 @@ */ #include +#include #include "net_driver.h" #include "nic.h" #include "io.h" @@ -53,7 +54,7 @@ int efx_mcdi_init(struct efx_nic *efx) mcdi = efx_mcdi(efx); init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); - atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); + mcdi->state = MCDI_STATE_QUIESCENT; mcdi->mode = MCDI_MODE_POLL; (void) efx_mcdi_poll_reboot(efx); @@ -65,8 +66,7 @@ int efx_mcdi_init(struct efx_nic *efx) void efx_mcdi_fini(struct efx_nic *efx) { - BUG_ON(efx->mcdi && - atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT); + BUG_ON(efx->mcdi && efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); kfree(efx->mcdi); } @@ -78,7 +78,7 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, size_t hdr_len; u32 xflags, seqno; - BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); + BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); @@ -258,20 +258,17 @@ static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) /* Wait until the interface becomes QUIESCENT and we win the race * to mark it RUNNING. */ wait_event(mcdi->wq, - atomic_cmpxchg(&mcdi->state, - MCDI_STATE_QUIESCENT, - MCDI_STATE_RUNNING) - == MCDI_STATE_QUIESCENT); + cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING) == + MCDI_STATE_QUIESCENT); } static int efx_mcdi_await_completion(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - if (wait_event_timeout( - mcdi->wq, - atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, - MCDI_RPC_TIMEOUT) == 0) + if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, + MCDI_RPC_TIMEOUT) == 0) return -ETIMEDOUT; /* Check if efx_mcdi_set_mode() switched us back to polled completions. @@ -296,9 +293,8 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) * QUIESCENT. [A subsequent invocation would increment seqno, so would * have failed the seqno check]. */ - if (atomic_cmpxchg(&mcdi->state, - MCDI_STATE_RUNNING, - MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { + if (cmpxchg(&mcdi->state, MCDI_STATE_RUNNING, MCDI_STATE_COMPLETED) == + MCDI_STATE_RUNNING) { wake_up(&mcdi->wq); return true; } @@ -308,7 +304,7 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) { - atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); + mcdi->state = MCDI_STATE_QUIESCENT; wake_up(&mcdi->wq); } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 303d9e8..6c0363a 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -46,7 +46,7 @@ enum efx_mcdi_mode { * @resp_data_len: Response data (SDU or error) length */ struct efx_mcdi_iface { - atomic_t state; + enum efx_mcdi_state state; enum efx_mcdi_mode mode; wait_queue_head_t wq; spinlock_t iface_lock; -- cgit v0.10.2 From cade715ff18440dda53e59c10c606586c92be33e Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 27 Aug 2013 23:12:31 +0100 Subject: sfc: Implement asynchronous MCDI requests This will allow use of MCDI from the data path, in particular for accelerated RFS. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index db2f119..69150fa 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1368,6 +1368,9 @@ static void efx_soft_disable_interrupts(struct efx_nic *efx) if (!channel->type->keep_eventq) efx_fini_eventq(channel); } + + /* Flush the asynchronous MCDI request queue */ + efx_mcdi_flush_async(efx); } static void efx_enable_interrupts(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index c616fb5..8150781 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -37,6 +37,18 @@ #define SEQ_MASK \ EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) +struct efx_mcdi_async_param { + struct list_head list; + unsigned int cmd; + size_t inlen; + size_t outlen; + efx_mcdi_async_completer *complete; + unsigned long cookie; + /* followed by request/response buffer */ +}; + +static void efx_mcdi_timeout_async(unsigned long context); + static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) { EFX_BUG_ON_PARANOID(!efx->mcdi); @@ -52,10 +64,15 @@ int efx_mcdi_init(struct efx_nic *efx) return -ENOMEM; mcdi = efx_mcdi(efx); + mcdi->efx = efx; init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); mcdi->state = MCDI_STATE_QUIESCENT; mcdi->mode = MCDI_MODE_POLL; + spin_lock_init(&mcdi->async_lock); + INIT_LIST_HEAD(&mcdi->async_list); + setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, + (unsigned long)mcdi); (void) efx_mcdi_poll_reboot(efx); mcdi->new_epoch = true; @@ -253,13 +270,21 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx) return efx->type->mcdi_poll_reboot(efx); } -static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) +static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) +{ + return cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == + MCDI_STATE_QUIESCENT; +} + +static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) { /* Wait until the interface becomes QUIESCENT and we win the race - * to mark it RUNNING. */ + * to mark it RUNNING_SYNC. + */ wait_event(mcdi->wq, cmpxchg(&mcdi->state, - MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING) == + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == MCDI_STATE_QUIESCENT); } @@ -285,16 +310,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx) return 0; } -static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) +/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the + * requester. Return whether this was done. Does not take any locks. + */ +static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) { - /* If the interface is RUNNING, then move to COMPLETED and wake any - * waiters. If the interface isn't in RUNNING then we've received a - * duplicate completion after we've already transitioned back to - * QUIESCENT. [A subsequent invocation would increment seqno, so would - * have failed the seqno check]. - */ - if (cmpxchg(&mcdi->state, MCDI_STATE_RUNNING, MCDI_STATE_COMPLETED) == - MCDI_STATE_RUNNING) { + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == + MCDI_STATE_RUNNING_SYNC) { wake_up(&mcdi->wq); return true; } @@ -304,10 +327,91 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + struct efx_mcdi_async_param *async; + struct efx_nic *efx = mcdi->efx; + + /* Process the asynchronous request queue */ + spin_lock_bh(&mcdi->async_lock); + async = list_first_entry_or_null( + &mcdi->async_list, struct efx_mcdi_async_param, list); + if (async) { + mcdi->state = MCDI_STATE_RUNNING_ASYNC; + efx_mcdi_send_request(efx, async->cmd, + (const efx_dword_t *)(async + 1), + async->inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + spin_unlock_bh(&mcdi->async_lock); + + if (async) + return; + } + mcdi->state = MCDI_STATE_QUIESCENT; wake_up(&mcdi->wq); } +/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the + * asynchronous completion function, and release the interface. + * Return whether this was done. Must be called in bh-disabled + * context. Will take iface_lock and async_lock. + */ +static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) +{ + struct efx_nic *efx = mcdi->efx; + struct efx_mcdi_async_param *async; + size_t hdr_len, data_len; + efx_dword_t *outbuf; + int rc; + + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != + MCDI_STATE_RUNNING_ASYNC) + return false; + + spin_lock(&mcdi->iface_lock); + if (timeout) { + /* Ensure that if the completion event arrives later, + * the seqno check in efx_mcdi_ev_cpl() will fail + */ + ++mcdi->seqno; + ++mcdi->credits; + rc = -ETIMEDOUT; + hdr_len = 0; + data_len = 0; + } else { + rc = mcdi->resprc; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + } + spin_unlock(&mcdi->iface_lock); + + /* Stop the timer. In case the timer function is running, we + * must wait for it to return so that there is no possibility + * of it aborting the next request. + */ + if (!timeout) + del_timer_sync(&mcdi->async_timer); + + spin_lock(&mcdi->async_lock); + async = list_first_entry(&mcdi->async_list, + struct efx_mcdi_async_param, list); + list_del(&async->list); + spin_unlock(&mcdi->async_lock); + + outbuf = (efx_dword_t *)(async + 1); + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(async->outlen, data_len)); + async->complete(efx, async->cookie, rc, outbuf, data_len); + kfree(async); + + efx_mcdi_release(mcdi); + + return true; +} + static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, unsigned int datalen, unsigned int mcdi_err) { @@ -339,8 +443,24 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, spin_unlock(&mcdi->iface_lock); - if (wake) - efx_mcdi_complete(mcdi); + if (wake) { + if (!efx_mcdi_complete_async(mcdi, false)) + (void) efx_mcdi_complete_sync(mcdi); + + /* If the interface isn't RUNNING_ASYNC or + * RUNNING_SYNC then we've received a duplicate + * completion after we've already transitioned back to + * QUIESCENT. [A subsequent invocation would increment + * seqno, so would have failed the seqno check]. + */ + } +} + +static void efx_mcdi_timeout_async(unsigned long context) +{ + struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; + + efx_mcdi_complete_async(mcdi, true); } static int @@ -383,11 +503,80 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, if (rc) return rc; - efx_mcdi_acquire(mcdi); + efx_mcdi_acquire_sync(mcdi); efx_mcdi_send_request(efx, cmd, inbuf, inlen); return 0; } +/** + * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes + * @outlen: Length to allocate for response buffer, in bytes + * @complete: Function to be called on completion or cancellation. + * @cookie: Arbitrary value to be passed to @complete. + * + * This function does not sleep and therefore may be called in atomic + * context. It will fail if event queues are disabled or if MCDI + * event completions have been disabled due to an error. + * + * If it succeeds, the @complete function will be called exactly once + * in atomic context, when one of the following occurs: + * (a) the completion event is received (in NAPI context) + * (b) event queues are disabled (in the process that disables them) + * (c) the request times-out (in timer context) + */ +int +efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, unsigned long cookie) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_async_param *async; + int rc; + + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; + + async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), + GFP_ATOMIC); + if (!async) + return -ENOMEM; + + async->cmd = cmd; + async->inlen = inlen; + async->outlen = outlen; + async->complete = complete; + async->cookie = cookie; + memcpy(async + 1, inbuf, inlen); + + spin_lock_bh(&mcdi->async_lock); + + if (mcdi->mode == MCDI_MODE_EVENTS) { + list_add_tail(&async->list, &mcdi->async_list); + + /* If this is at the front of the queue, try to start it + * immediately + */ + if (mcdi->async_list.next == &async->list && + efx_mcdi_acquire_async(mcdi)) { + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + } else { + kfree(async); + rc = -ENETDOWN; + } + + spin_unlock_bh(&mcdi->async_lock); + + return rc; +} + int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) @@ -455,6 +644,10 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, return rc; } +/* Switch to polled MCDI completions. This can be called in various + * error conditions with various locks held, so it must be lockless. + * Caller is responsible for flushing asynchronous requests later. + */ void efx_mcdi_mode_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -472,11 +665,50 @@ void efx_mcdi_mode_poll(struct efx_nic *efx) * efx_mcdi_await_completion() will then call efx_mcdi_poll(). * * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), - * which efx_mcdi_complete() provides for us. + * which efx_mcdi_complete_sync() provides for us. */ mcdi->mode = MCDI_MODE_POLL; - efx_mcdi_complete(mcdi); + efx_mcdi_complete_sync(mcdi); +} + +/* Flush any running or queued asynchronous requests, after event processing + * is stopped + */ +void efx_mcdi_flush_async(struct efx_nic *efx) +{ + struct efx_mcdi_async_param *async, *next; + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + + /* We must be in polling mode so no more requests can be queued */ + BUG_ON(mcdi->mode != MCDI_MODE_POLL); + + del_timer_sync(&mcdi->async_timer); + + /* If a request is still running, make sure we give the MC + * time to complete it so that the response won't overwrite our + * next request. + */ + if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { + efx_mcdi_poll(efx); + mcdi->state = MCDI_STATE_QUIESCENT; + } + + /* Nothing else will access the async list now, so it is safe + * to walk it without holding async_lock. If we hold it while + * calling a completer then lockdep may warn that we have + * acquired locks in the wrong order. + */ + list_for_each_entry_safe(async, next, &mcdi->async_list, list) { + async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); + list_del(&async->list); + kfree(async); + } } void efx_mcdi_mode_event(struct efx_nic *efx) @@ -498,7 +730,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx) * write memory barrier ensure that efx_mcdi_rpc() sees it, which * efx_mcdi_acquire() provides. */ - efx_mcdi_acquire(mcdi); + efx_mcdi_acquire_sync(mcdi); mcdi->mode = MCDI_MODE_EVENTS; efx_mcdi_release(mcdi); } @@ -515,16 +747,21 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) * are sent to the same queue, we can't be racing with * efx_mcdi_ev_cpl()] * - * There's a race here with efx_mcdi_rpc(), because we might receive - * a REBOOT event *before* the request has been copied out. In polled - * mode (during startup) this is irrelevant, because efx_mcdi_complete() - * is ignored. In event mode, this condition is just an edge-case of - * receiving a REBOOT event after posting the MCDI request. Did the mc - * reboot before or after the copyout? The best we can do always is - * just return failure. + * If there is an outstanding asynchronous request, we can't + * complete it now (efx_mcdi_complete() would deadlock). The + * reset process will take care of this. + * + * There's a race here with efx_mcdi_send_request(), because + * we might receive a REBOOT event *before* the request has + * been copied out. In polled mode (during startup) this is + * irrelevant, because efx_mcdi_complete_sync() is ignored. In + * event mode, this condition is just an edge-case of + * receiving a REBOOT event after posting the MCDI + * request. Did the mc reboot before or after the copyout? The + * best we can do always is just return failure. */ spin_lock(&mcdi->iface_lock); - if (efx_mcdi_complete(mcdi)) { + if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = rc; mcdi->resp_hdr_len = 0; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 6c0363a..e37cf1d 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -14,15 +14,17 @@ * enum efx_mcdi_state - MCDI request handling state * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING - * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that - * moved into this state is allowed to move out of it. + * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending. + * Only the thread that moved into this state is allowed to move out of it. + * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending. * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread * has not yet consumed the result. For all other threads, equivalent to * %MCDI_STATE_RUNNING. */ enum efx_mcdi_state { MCDI_STATE_QUIESCENT, - MCDI_STATE_RUNNING, + MCDI_STATE_RUNNING_SYNC, + MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED, }; @@ -33,19 +35,25 @@ enum efx_mcdi_mode { /** * struct efx_mcdi_iface - MCDI protocol context + * @efx: The associated NIC. * @state: Request handling state. Waited for by @wq. * @mode: Poll for mcdi completion, or wait for an mcdi_event. * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING * @new_epoch: Indicates start of day or start of MC reboot recovery - * @iface_lock: Serialises access to all the following fields + * @iface_lock: Serialises access to @seqno, @credits and response metadata * @seqno: The next sequence number to use for mcdi requests. * @credits: Number of spurious MCDI completion events allowed before we * trigger a fatal error * @resprc: Response error/success code (Linux numbering) * @resp_hdr_len: Response header length * @resp_data_len: Response data (SDU or error) length + * @async_lock: Serialises access to @async_list while event processing is + * enabled + * @async_list: Queue of asynchronous requests + * @async_timer: Timer for asynchronous request timeout */ struct efx_mcdi_iface { + struct efx_nic *efx; enum efx_mcdi_state state; enum efx_mcdi_mode mode; wait_queue_head_t wq; @@ -56,6 +64,9 @@ struct efx_mcdi_iface { int resprc; size_t resp_hdr_len; size_t resp_data_len; + spinlock_t async_lock; + struct list_head async_list; + struct timer_list async_timer; }; struct efx_mcdi_mon { @@ -111,10 +122,20 @@ extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); +typedef void efx_mcdi_async_completer(struct efx_nic *efx, + unsigned long cookie, int rc, + efx_dword_t *outbuf, + size_t outlen_actual); +extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); extern int efx_mcdi_poll_reboot(struct efx_nic *efx); extern void efx_mcdi_mode_poll(struct efx_nic *efx); extern void efx_mcdi_mode_event(struct efx_nic *efx); +extern void efx_mcdi_flush_async(struct efx_nic *efx); extern void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); -- cgit v0.10.2 From b883d0bd4ae91059242fd2f8c2a70f308ef63dc1 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 15 Jan 2013 22:00:07 +0000 Subject: sfc: Document conditions for multicast replication vs filter replacement Add the efx_filter_is_mc_recip() function to decide whether a filter is for a multicast recipient and can coexist with other filters with the same match values. Update efx_filter_insert_filter() kernel-doc to explain the conditions for this. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 3bbc047..9de28f6 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -79,13 +79,20 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); * On success, return the filter ID. * On failure, return a negative error code. * - * If an existing filter has equal match values to the new filter - * spec, then the new filter might replace it, depending on the - * relative priorities. If the existing filter has lower priority, or - * if @replace_equal is set and it has equal priority, then it is - * replaced. Otherwise the function fails, returning -%EPERM if - * the existing filter has higher priority or -%EEXIST if it has - * equal priority. + * If existing filters have equal match values to the new filter spec, + * then the new filter might replace them or the function might fail, + * as follows. + * + * 1. If the existing filters have lower priority, or @replace_equal + * is set and they have equal priority, replace them. + * + * 2. If the existing filters have higher priority, return -%EPERM. + * + * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not + * support delivery to multiple recipients, return -%EEXIST. + * + * This implies that filters for multiple multicast recipients must + * all be inserted with the same priority and @replace_equal = %false. */ static inline s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, @@ -169,6 +176,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} #define efx_filter_rfs_enabled() 0 #endif +extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); /* Channels */ extern int efx_channel_dummy_op_int(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 864b6ff..81eab21 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -903,3 +903,37 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) } #endif /* CONFIG_RFS_ACCEL */ + +/** + * efx_filter_is_mc_recipient - test whether spec is a multicast recipient + * @spec: Specification to test + * + * Return: %true if the specification is a non-drop RX filter that + * matches a local MAC address I/G bit value of 1 or matches a local + * IPv4 or IPv6 address value in the respective multicast address + * range. Otherwise %false. + */ +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) +{ + if (!(spec->flags & EFX_FILTER_FLAG_RX) || + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + return false; + + if (spec->match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && + is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + ipv4_is_multicast(spec->loc_host[0])) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] == 0xff) + return true; + } + + return false; +} -- cgit v0.10.2 From 261e4d96b45476fa7386130a309bc15af9eca2e0 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Mon, 15 Apr 2013 18:51:54 +0100 Subject: sfc: Allow event queue initialisation to fail On EF10, event queue initialisation requires an MCDI request which may return failure. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 69150fa..84c47d3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -189,7 +189,7 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); * *************************************************************************/ -static void efx_soft_enable_interrupts(struct efx_nic *efx); +static int efx_soft_enable_interrupts(struct efx_nic *efx); static void efx_soft_disable_interrupts(struct efx_nic *efx); static void efx_remove_channel(struct efx_channel *channel); static void efx_remove_channels(struct efx_nic *efx); @@ -329,15 +329,21 @@ static int efx_probe_eventq(struct efx_channel *channel) } /* Prepare channel's event queue */ -static void efx_init_eventq(struct efx_channel *channel) +static int efx_init_eventq(struct efx_channel *channel) { + int rc; + + EFX_WARN_ON_PARANOID(channel->eventq_init); + netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d init event queue\n", channel->channel); - channel->eventq_read_ptr = 0; - - efx_nic_init_eventq(channel); - channel->eventq_init = true; + rc = efx_nic_init_eventq(channel); + if (rc == 0) { + channel->eventq_read_ptr = 0; + channel->eventq_init = true; + } + return rc; } /* Enable event queue processing and NAPI */ @@ -722,7 +728,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; u32 old_rxq_entries, old_txq_entries; unsigned i, next_buffer_table = 0; - int rc; + int rc, rc2; rc = efx_check_disabled(efx); if (rc) @@ -802,9 +808,16 @@ out: } } - efx_soft_enable_interrupts(efx); - efx_start_all(efx); - netif_device_attach(efx->net_dev); + rc2 = efx_soft_enable_interrupts(efx); + if (rc2) { + rc = rc ? rc : rc2; + netif_err(efx, drv, efx->net_dev, + "unable to restart interrupts on channel reallocation\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + } else { + efx_start_all(efx); + netif_device_attach(efx->net_dev); + } return rc; rollback: @@ -1327,9 +1340,10 @@ static int efx_probe_interrupts(struct efx_nic *efx) return 0; } -static void efx_soft_enable_interrupts(struct efx_nic *efx) +static int efx_soft_enable_interrupts(struct efx_nic *efx) { - struct efx_channel *channel; + struct efx_channel *channel, *end_channel; + int rc; BUG_ON(efx->state == STATE_DISABLED); @@ -1337,12 +1351,28 @@ static void efx_soft_enable_interrupts(struct efx_nic *efx) smp_wmb(); efx_for_each_channel(channel, efx) { - if (!channel->type->keep_eventq) - efx_init_eventq(channel); + if (!channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } efx_start_eventq(channel); } efx_mcdi_mode_event(efx); + + return 0; +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + return rc; } static void efx_soft_disable_interrupts(struct efx_nic *efx) @@ -1373,9 +1403,10 @@ static void efx_soft_disable_interrupts(struct efx_nic *efx) efx_mcdi_flush_async(efx); } -static void efx_enable_interrupts(struct efx_nic *efx) +static int efx_enable_interrupts(struct efx_nic *efx) { - struct efx_channel *channel; + struct efx_channel *channel, *end_channel; + int rc; BUG_ON(efx->state == STATE_DISABLED); @@ -1387,11 +1418,31 @@ static void efx_enable_interrupts(struct efx_nic *efx) efx->type->irq_enable_master(efx); efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + } + + rc = efx_soft_enable_interrupts(efx); + if (rc) + goto fail; + + return 0; + +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; if (channel->type->keep_eventq) - efx_init_eventq(channel); + efx_fini_eventq(channel); } - efx_soft_enable_interrupts(efx); + efx->type->irq_disable_non_ev(efx); + + return rc; } static void efx_disable_interrupts(struct efx_nic *efx) @@ -2205,7 +2256,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) "could not restore PHY settings\n"); } - efx_enable_interrupts(efx); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; efx_restore_filters(efx); efx_sriov_reset(efx); @@ -2649,10 +2702,14 @@ static int efx_pci_probe_main(struct efx_nic *efx) rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; - efx_enable_interrupts(efx); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail6; return 0; + fail6: + efx_nic_fini_interrupt(efx); fail5: efx_fini_port(efx); fail4: @@ -2780,12 +2837,15 @@ static int efx_pm_freeze(struct device *dev) static int efx_pm_thaw(struct device *dev) { + int rc; struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); rtnl_lock(); if (efx->state != STATE_DISABLED) { - efx_enable_interrupts(efx); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; mutex_lock(&efx->mac_lock); efx->phy_op->reconfigure(efx); @@ -2806,6 +2866,11 @@ static int efx_pm_thaw(struct device *dev) queue_work(reset_workqueue, &efx->reset_work); return 0; + +fail: + rtnl_unlock(); + + return rc; } static int efx_pm_poweroff(struct device *dev) @@ -2842,8 +2907,8 @@ static int efx_pm_resume(struct device *dev) rc = efx->type->init(efx); if (rc) return rc; - efx_pm_thaw(dev); - return 0; + rc = efx_pm_thaw(dev); + return rc; } static int efx_pm_suspend(struct device *dev) diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index d21483d..904af5c 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1325,7 +1325,7 @@ int efx_farch_ev_probe(struct efx_channel *channel) entries * sizeof(efx_qword_t)); } -void efx_farch_ev_init(struct efx_channel *channel) +int efx_farch_ev_init(struct efx_channel *channel) { efx_oword_t reg; struct efx_nic *efx = channel->efx; @@ -1358,6 +1358,8 @@ void efx_farch_ev_init(struct efx_channel *channel) channel->channel); efx->type->push_irq_moderation(channel); + + return 0; } void efx_farch_ev_fini(struct efx_channel *channel) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index e85bed0..54900f3 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1087,7 +1087,7 @@ struct efx_nic_type { void (*rx_write)(struct efx_rx_queue *rx_queue); void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); int (*ev_probe)(struct efx_channel *channel); - void (*ev_init)(struct efx_channel *channel); + int (*ev_init)(struct efx_channel *channel); void (*ev_fini)(struct efx_channel *channel); void (*ev_remove)(struct efx_channel *channel); int (*ev_process)(struct efx_channel *channel, int quota); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 9afbf36..686ce7a 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -503,9 +503,9 @@ static inline int efx_nic_probe_eventq(struct efx_channel *channel) { return channel->efx->type->ev_probe(channel); } -static inline void efx_nic_init_eventq(struct efx_channel *channel) +static inline int efx_nic_init_eventq(struct efx_channel *channel) { - channel->efx->type->ev_init(channel); + return channel->efx->type->ev_init(channel); } static inline void efx_nic_fini_eventq(struct efx_channel *channel) { @@ -539,7 +539,7 @@ extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); extern int efx_farch_ev_probe(struct efx_channel *channel); -extern void efx_farch_ev_init(struct efx_channel *channel); +extern int efx_farch_ev_init(struct efx_channel *channel); extern void efx_farch_ev_fini(struct efx_channel *channel); extern void efx_farch_ev_remove(struct efx_channel *channel); extern int efx_farch_ev_process(struct efx_channel *channel, int quota); -- cgit v0.10.2 From c15eed220fd1dd31d7ad2e4893b679331e6f1e74 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 29 Aug 2013 00:45:48 +0100 Subject: sfc: Allow efx_nic_type::dimension_resources to fail Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 84c47d3..59aa73c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1513,9 +1513,11 @@ static int efx_probe_nic(struct efx_nic *efx) * in MSI-X interrupts. */ rc = efx_probe_interrupts(efx); if (rc) - goto fail; + goto fail1; - efx->type->dimension_resources(efx); + rc = efx->type->dimension_resources(efx); + if (rc) + goto fail2; if (efx->n_channels > 1) get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); @@ -1533,7 +1535,9 @@ static int efx_probe_nic(struct efx_nic *efx) return 0; -fail: +fail2: + efx_remove_interrupts(efx); +fail1: efx->type->remove(efx); return rc; } diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index ec77611..a7b30ddb 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2174,10 +2174,11 @@ out: return rc; } -static void falcon_dimension_resources(struct efx_nic *efx) +static int falcon_dimension_resources(struct efx_nic *efx) { efx->rx_dc_base = 0x20000; efx->tx_dc_base = 0x26000; + return 0; } /* Probe all SPI devices on the NIC */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 54900f3..89974f7 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1036,7 +1036,7 @@ struct efx_nic_type { int (*probe)(struct efx_nic *efx); void (*remove)(struct efx_nic *efx); int (*init)(struct efx_nic *efx); - void (*dimension_resources)(struct efx_nic *efx); + int (*dimension_resources)(struct efx_nic *efx); void (*fini)(struct efx_nic *efx); void (*monitor)(struct efx_nic *efx); enum reset_type (*map_reset_reason)(enum reset_type reason); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 89180d4..1500405 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -177,13 +177,14 @@ static int siena_probe_nvconfig(struct efx_nic *efx) return rc; } -static void siena_dimension_resources(struct efx_nic *efx) +static int siena_dimension_resources(struct efx_nic *efx) { /* Each port has a small block of internal SRAM dedicated to * the buffer table and descriptor caches. In theory we can * map both blocks to one port, but we don't. */ efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); + return 0; } static unsigned int siena_mem_map_size(struct efx_nic *efx) -- cgit v0.10.2 From 15acb1cea5d9298eac511b80380183be624fa31c Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 1 May 2013 16:30:17 +0100 Subject: sfc: Initialise IRQ moderation for all NIC types from efx_init_eventq() Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 59aa73c..aec6213 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -331,15 +331,17 @@ static int efx_probe_eventq(struct efx_channel *channel) /* Prepare channel's event queue */ static int efx_init_eventq(struct efx_channel *channel) { + struct efx_nic *efx = channel->efx; int rc; EFX_WARN_ON_PARANOID(channel->eventq_init); - netif_dbg(channel->efx, drv, channel->efx->net_dev, + netif_dbg(efx, drv, efx->net_dev, "chan %d init event queue\n", channel->channel); rc = efx_nic_init_eventq(channel); if (rc == 0) { + efx->type->push_irq_moderation(channel); channel->eventq_read_ptr = 0; channel->eventq_init = true; } diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 904af5c..eb754cf 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1357,8 +1357,6 @@ int efx_farch_ev_init(struct efx_channel *channel) efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, channel->channel); - efx->type->push_irq_moderation(channel); - return 0; } -- cgit v0.10.2 From e8c68c0a09279107b5b239ca6fa7c5839717b7e2 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Fri, 8 Mar 2013 10:18:28 +0000 Subject: sfc: Prepare for RX scatter on EF10 RX DMA scatter is always enabled on EF10. Adjust the common RX completion handling to allow for this. RX completion events on EF10 include the length used from a single descriptor, not the cumulative length used. Add a field to struct efx_rx_queue to hold the cumulative length. [bwh: Also fix a related comment] Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index aec6213..b483223 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -587,7 +587,7 @@ static void efx_start_datapath(struct efx_nic *efx) rx_buf_len = (sizeof(struct efx_rx_page_state) + NET_IP_ALIGN + efx->rx_dma_len); if (rx_buf_len <= PAGE_SIZE) { - efx->rx_scatter = false; + efx->rx_scatter = efx->type->always_rx_scatter; efx->rx_buffer_order = 0; } else if (efx->type->can_rx_scatter) { BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); @@ -615,7 +615,7 @@ static void efx_start_datapath(struct efx_nic *efx) efx->rx_dma_len, efx->rx_page_buf_step, efx->rx_bufs_per_page, efx->rx_pages_per_batch); - /* RX filters also have scatter-enabled flags */ + /* RX filters may also have scatter-enabled flags */ if (efx->rx_scatter != old_rx_scatter) efx->type->filter_update_rx_scatter(efx); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 89974f7..5341c76 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -297,7 +297,8 @@ struct efx_rx_page_state { * @added_count: Number of buffers added to the receive queue. * @notified_count: Number of buffers given to NIC (<= @added_count). * @removed_count: Number of buffers removed from the receive queue. - * @scatter_n: Number of buffers used by current packet + * @scatter_n: Used by NIC specific receive code. + * @scatter_len: Used by NIC specific receive code. * @page_ring: The ring to store DMA mapped pages for reuse. * @page_add: Counter to calculate the write pointer for the recycle ring. * @page_remove: Counter to calculate the read pointer for the recycle ring. @@ -329,6 +330,7 @@ struct efx_rx_queue { unsigned int notified_count; unsigned int removed_count; unsigned int scatter_n; + unsigned int scatter_len; struct page **page_ring; unsigned int page_add; unsigned int page_remove; @@ -1023,7 +1025,8 @@ struct efx_mtd_partition { * @rx_prefix_size: Size of RX prefix before packet data * @rx_hash_offset: Offset of RX flow hash within prefix * @rx_buffer_padding: Size of padding at end of RX packet - * @can_rx_scatter: NIC is able to scatter packet to multiple buffers + * @can_rx_scatter: NIC is able to scatter packets to multiple buffers + * @always_rx_scatter: NIC will always scatter packets to multiple buffers * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) @@ -1142,6 +1145,7 @@ struct efx_nic_type { unsigned int rx_hash_offset; unsigned int rx_buffer_padding; bool can_rx_scatter; + bool always_rx_scatter; unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 81eab21..8c13dd8 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -529,8 +529,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, if (!(flags & EFX_RX_PKT_PREFIX_LEN)) efx_rx_packet__check_len(rx_queue, rx_buf, len); } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || - unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || - unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || + unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || + unlikely(len > n_frags * efx->rx_dma_len) || unlikely(!efx->rx_scatter)) { /* If this isn't an explicit discard request, either * the hardware or the driver is broken. @@ -581,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, rx_buf = efx_rx_buf_next(rx_queue, rx_buf); if (--tail_frags == 0) break; - efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); + efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); } - rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; + rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); } -- cgit v0.10.2 From 3881d8ab065b23bb07400aa820e737d80fdaced3 Mon Sep 17 00:00:00 2001 From: Alexandre Rames Date: Mon, 10 Jun 2013 11:03:21 +0100 Subject: sfc: Use a global count of active queues instead of pending drains On EF10, the firmware will initiate a queue flush in certain error cases. We need to accept that flush events might appear at any time after a queue has been initialised, not just when we try to flush them. We can handle Falcon-architecture in just the same way. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b483223..34788fb 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -631,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx) /* Initialise the channels */ efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) + efx_for_each_channel_tx_queue(tx_queue, channel) { efx_init_tx_queue(tx_queue); + atomic_inc(&efx->active_queues); + } efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); + atomic_inc(&efx->active_queues); efx_nic_generate_fill_event(rx_queue); } diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index eb754cf..842f92e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -594,7 +594,7 @@ static bool efx_farch_flush_wake(struct efx_nic *efx) /* Ensure that all updates are visible to efx_farch_flush_queues() */ smp_mb(); - return (atomic_read(&efx->drain_pending) == 0 || + return (atomic_read(&efx->active_queues) == 0 || (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT && atomic_read(&efx->rxq_flush_pending) > 0)); } @@ -626,7 +626,7 @@ static bool efx_check_tx_flush_complete(struct efx_nic *efx) netif_dbg(efx, hw, efx->net_dev, "flush complete on TXQ %d, so drain " "the queue\n", tx_queue->queue); - /* Don't need to increment drain_pending as it + /* Don't need to increment active_queues as it * has already been incremented for the queues * which did not drain */ @@ -653,17 +653,15 @@ static int efx_farch_do_flush(struct efx_nic *efx) efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { - atomic_inc(&efx->drain_pending); efx_farch_flush_tx_queue(tx_queue); } efx_for_each_channel_rx_queue(rx_queue, channel) { - atomic_inc(&efx->drain_pending); rx_queue->flush_pending = true; atomic_inc(&efx->rxq_flush_pending); } } - while (timeout && atomic_read(&efx->drain_pending) > 0) { + while (timeout && atomic_read(&efx->active_queues) > 0) { /* If SRIOV is enabled, then offload receive queue flushing to * the firmware (though we will still have to poll for * completion). If that fails, fall back to the old scheme. @@ -699,15 +697,15 @@ static int efx_farch_do_flush(struct efx_nic *efx) timeout); } - if (atomic_read(&efx->drain_pending) && + if (atomic_read(&efx->active_queues) && !efx_check_tx_flush_complete(efx)) { netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " - "(rx %d+%d)\n", atomic_read(&efx->drain_pending), + "(rx %d+%d)\n", atomic_read(&efx->active_queues), atomic_read(&efx->rxq_flush_outstanding), atomic_read(&efx->rxq_flush_pending)); rc = -ETIMEDOUT; - atomic_set(&efx->drain_pending, 0); + atomic_set(&efx->active_queues, 0); atomic_set(&efx->rxq_flush_pending, 0); atomic_set(&efx->rxq_flush_outstanding, 0); } @@ -1123,8 +1121,8 @@ efx_farch_handle_drain_event(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; - WARN_ON(atomic_read(&efx->drain_pending) == 0); - atomic_dec(&efx->drain_pending); + WARN_ON(atomic_read(&efx->active_queues) == 0); + atomic_dec(&efx->active_queues); if (efx_farch_flush_wake(efx)) wake_up(&efx->flush_wq); } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5341c76..c9b6f2d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -723,7 +723,7 @@ struct vfdi_status; * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * indexed by filter ID * @rps_expire_index: Next index to check for expiry in @rps_flow_id - * @drain_pending: Count of RX and TX queues that haven't been flushed and drained. + * @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. * @rxq_flush_outstanding: Count of number of RX flushes started but not yet @@ -864,7 +864,7 @@ struct efx_nic { unsigned int rps_expire_index; #endif - atomic_t drain_pending; + atomic_t active_queues; atomic_t rxq_flush_pending; atomic_t rxq_flush_outstanding; wait_queue_head_t flush_wq; -- cgit v0.10.2 From ba8977bdb20d7ae72ec6fddc1c081ca2d56852cb Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 8 Jan 2013 23:43:19 +0000 Subject: sfc: Extend struct efx_tx_buffer to allow pushing option descriptors The TX path firmware for EF10 supports 'option descriptors' to control offloads and various other features. Add a flag and field for these in struct efx_tx_buffer, and don't treat them as DMA descriptors on completion. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 842f92e..65073aa 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -325,6 +325,8 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue) txd = efx_tx_desc(tx_queue, write_ptr); ++tx_queue->write_count; + EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); + /* Create TX descriptor ring entry */ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); EFX_POPULATE_QWORD_4(*txd, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index c9b6f2d..d1aa5dc 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -135,6 +135,7 @@ struct efx_special_buffer { * freed when descriptor completes * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be * freed when descriptor completes. + * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor. * @dma_addr: DMA address of the fragment. * @flags: Flags for allocation and DMA mapping type * @len: Length of this fragment. @@ -146,7 +147,10 @@ struct efx_tx_buffer { const struct sk_buff *skb; void *heap_buf; }; - dma_addr_t dma_addr; + union { + efx_qword_t option; + dma_addr_t dma_addr; + }; unsigned short flags; unsigned short len; unsigned short unmap_len; @@ -155,6 +159,7 @@ struct efx_tx_buffer { #define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ #define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ #define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ +#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ /** * struct efx_tx_queue - An Efx TX queue diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 85ee647..82075f9 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; - if (unlikely(buffer->len == 0)) { + + if (!(buffer->flags & EFX_TX_BUF_OPTION) && + unlikely(buffer->len == 0)) { netif_err(efx, tx_err, efx->net_dev, "TX queue %d spurious TX completion id %x\n", tx_queue->queue, read_ptr); -- cgit v0.10.2 From 9c51716512c1e73523b685f389fbb748c15436e4 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 19 Sep 2012 17:47:08 +0100 Subject: sfc: Add EF10 register and structure definitions Also update comments and assertions in io.h: - EF10 does not have a general BIU collector and does not have the bug affecting TIMER_COMMAND_REG[0] on Falcon/Siena - The WPTR field moved within RX_DESC_UPD_REG and TX_DESC_UPD_REG. Adjust efx_writed_page() accordingly Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h new file mode 100644 index 0000000..b3f4e37 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef10_regs.h @@ -0,0 +1,415 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF10_REGS_H +#define EFX_EF10_REGS_H + +/* EF10 hardware architecture definitions have a name prefix following + * the format: + * + * E__ + * + * The following strings are used: + * + * MMIO register Host memory structure + * ------------------------------------------------------------- + * Address R + * Bitfield RF SF + * Enumerator FE SE + * + * is the first revision to which the definition applies: + * + * D: Huntington A0 + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * EF10 registers and descriptors + * + ************************************************************************** + */ + +/* BIU_HW_REV_ID_REG: */ +#define ER_DZ_BIU_HW_REV_ID 0x00000000 +#define ERF_DZ_HW_REV_ID_LBN 0 +#define ERF_DZ_HW_REV_ID_WIDTH 32 + +/* BIU_MC_SFT_STATUS_REG: */ +#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010 +#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4 +#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8 +#define ERF_DZ_MC_SFT_STATUS_LBN 0 +#define ERF_DZ_MC_SFT_STATUS_WIDTH 32 + +/* BIU_INT_ISR_REG: */ +#define ER_DZ_BIU_INT_ISR 0x00000090 +#define ERF_DZ_ISR_REG_LBN 0 +#define ERF_DZ_ISR_REG_WIDTH 32 + +/* MC_DB_LWRD_REG: */ +#define ER_DZ_MC_DB_LWRD 0x00000200 +#define ERF_DZ_MC_DOORBELL_L_LBN 0 +#define ERF_DZ_MC_DOORBELL_L_WIDTH 32 + +/* MC_DB_HWRD_REG: */ +#define ER_DZ_MC_DB_HWRD 0x00000204 +#define ERF_DZ_MC_DOORBELL_H_LBN 0 +#define ERF_DZ_MC_DOORBELL_H_WIDTH 32 + +/* EVQ_RPTR_REG: */ +#define ER_DZ_EVQ_RPTR 0x00000400 +#define ER_DZ_EVQ_RPTR_STEP 8192 +#define ER_DZ_EVQ_RPTR_ROWS 2048 +#define ERF_DZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_DZ_EVQ_RPTR_LBN 0 +#define ERF_DZ_EVQ_RPTR_WIDTH 15 + +/* EVQ_TMR_REG: */ +#define ER_DZ_EVQ_TMR 0x00000420 +#define ER_DZ_EVQ_TMR_STEP 8192 +#define ER_DZ_EVQ_TMR_ROWS 2048 +#define ERF_DZ_TC_TIMER_MODE_LBN 14 +#define ERF_DZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_DZ_TC_TIMER_VAL_LBN 0 +#define ERF_DZ_TC_TIMER_VAL_WIDTH 14 + +/* RX_DESC_UPD_REG: */ +#define ER_DZ_RX_DESC_UPD 0x00000830 +#define ER_DZ_RX_DESC_UPD_STEP 8192 +#define ER_DZ_RX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RX_DESC_WPTR_LBN 0 +#define ERF_DZ_RX_DESC_WPTR_WIDTH 12 + +/* TX_DESC_UPD_REG: */ +#define ER_DZ_TX_DESC_UPD 0x00000a10 +#define ER_DZ_TX_DESC_UPD_STEP 8192 +#define ER_DZ_TX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RSVD_LBN 76 +#define ERF_DZ_RSVD_WIDTH 20 +#define ERF_DZ_TX_DESC_WPTR_LBN 64 +#define ERF_DZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_DZ_TX_DESC_HWORD_LBN 32 +#define ERF_DZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_DZ_TX_DESC_LWORD_LBN 0 +#define ERF_DZ_TX_DESC_LWORD_WIDTH 32 + +/* DRIVER_EV */ +#define ESF_DZ_DRV_CODE_LBN 60 +#define ESF_DZ_DRV_CODE_WIDTH 4 +#define ESF_DZ_DRV_SUB_CODE_LBN 56 +#define ESF_DZ_DRV_SUB_CODE_WIDTH 4 +#define ESE_DZ_DRV_TIMER_EV 3 +#define ESE_DZ_DRV_START_UP_EV 2 +#define ESE_DZ_DRV_WAKE_UP_EV 1 +#define ESF_DZ_DRV_SUB_DATA_LBN 0 +#define ESF_DZ_DRV_SUB_DATA_WIDTH 56 +#define ESF_DZ_DRV_EVQ_ID_LBN 0 +#define ESF_DZ_DRV_EVQ_ID_WIDTH 14 +#define ESF_DZ_DRV_TMR_ID_LBN 0 +#define ESF_DZ_DRV_TMR_ID_WIDTH 14 + +/* EVENT_ENTRY */ +#define ESF_DZ_EV_CODE_LBN 60 +#define ESF_DZ_EV_CODE_WIDTH 4 +#define ESE_DZ_EV_CODE_MCDI_EV 12 +#define ESE_DZ_EV_CODE_DRIVER_EV 5 +#define ESE_DZ_EV_CODE_TX_EV 2 +#define ESE_DZ_EV_CODE_RX_EV 0 +#define ESE_DZ_OTHER other +#define ESF_DZ_EV_DATA_LBN 0 +#define ESF_DZ_EV_DATA_WIDTH 60 + +/* MC_EVENT */ +#define ESF_DZ_MC_CODE_LBN 60 +#define ESF_DZ_MC_CODE_WIDTH 4 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_MC_DROP_EVENT_LBN 58 +#define ESF_DZ_MC_DROP_EVENT_WIDTH 1 +#define ESF_DZ_MC_SOFT_LBN 0 +#define ESF_DZ_MC_SOFT_WIDTH 58 + +/* RX_EVENT */ +#define ESF_DZ_RX_CODE_LBN 60 +#define ESF_DZ_RX_CODE_WIDTH 4 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_RX_DROP_EVENT_LBN 58 +#define ESF_DZ_RX_DROP_EVENT_WIDTH 1 +#define ESF_DZ_RX_EV_RSVD2_LBN 54 +#define ESF_DZ_RX_EV_RSVD2_WIDTH 4 +#define ESF_DZ_RX_EV_SOFT2_LBN 52 +#define ESF_DZ_RX_EV_SOFT2_WIDTH 2 +#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 +#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 +#define ESF_DZ_RX_L4_CLASS_LBN 45 +#define ESF_DZ_RX_L4_CLASS_WIDTH 3 +#define ESE_DZ_L4_CLASS_RSVD7 7 +#define ESE_DZ_L4_CLASS_RSVD6 6 +#define ESE_DZ_L4_CLASS_RSVD5 5 +#define ESE_DZ_L4_CLASS_RSVD4 4 +#define ESE_DZ_L4_CLASS_RSVD3 3 +#define ESE_DZ_L4_CLASS_UDP 2 +#define ESE_DZ_L4_CLASS_TCP 1 +#define ESE_DZ_L4_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_L3_CLASS_LBN 42 +#define ESF_DZ_RX_L3_CLASS_WIDTH 3 +#define ESE_DZ_L3_CLASS_RSVD7 7 +#define ESE_DZ_L3_CLASS_IP6_FRAG 6 +#define ESE_DZ_L3_CLASS_ARP 5 +#define ESE_DZ_L3_CLASS_IP4_FRAG 4 +#define ESE_DZ_L3_CLASS_FCOE 3 +#define ESE_DZ_L3_CLASS_IP6 2 +#define ESE_DZ_L3_CLASS_IP4 1 +#define ESE_DZ_L3_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39 +#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3 +#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7 +#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6 +#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5 +#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4 +#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3 +#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2 +#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1 +#define ESE_DZ_ETH_TAG_CLASS_NONE 0 +#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36 +#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3 +#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2 +#define ESE_DZ_ETH_BASE_CLASS_LLC 1 +#define ESE_DZ_ETH_BASE_CLASS_ETH2 0 +#define ESF_DZ_RX_MAC_CLASS_LBN 35 +#define ESF_DZ_RX_MAC_CLASS_WIDTH 1 +#define ESE_DZ_MAC_CLASS_MCAST 1 +#define ESE_DZ_MAC_CLASS_UCAST 0 +#define ESF_DZ_RX_EV_SOFT1_LBN 32 +#define ESF_DZ_RX_EV_SOFT1_WIDTH 3 +#define ESF_DZ_RX_EV_RSVD1_LBN 31 +#define ESF_DZ_RX_EV_RSVD1_WIDTH 1 +#define ESF_DZ_RX_ABORT_LBN 30 +#define ESF_DZ_RX_ABORT_WIDTH 1 +#define ESF_DZ_RX_ECC_ERR_LBN 29 +#define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC1_ERR_LBN 28 +#define ESF_DZ_RX_CRC1_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC0_ERR_LBN 27 +#define ESF_DZ_RX_CRC0_ERR_WIDTH 1 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25 +#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_ECRC_ERR_LBN 24 +#define ESF_DZ_RX_ECRC_ERR_WIDTH 1 +#define ESF_DZ_RX_QLABEL_LBN 16 +#define ESF_DZ_RX_QLABEL_WIDTH 5 +#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15 +#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1 +#define ESF_DZ_RX_CONT_LBN 14 +#define ESF_DZ_RX_CONT_WIDTH 1 +#define ESF_DZ_RX_BYTES_LBN 0 +#define ESF_DZ_RX_BYTES_WIDTH 14 + +/* RX_KER_DESC */ +#define ESF_DZ_RX_KER_RESERVED_LBN 62 +#define ESF_DZ_RX_KER_RESERVED_WIDTH 2 +#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 + +/* RX_USER_DESC */ +#define ESF_DZ_RX_USR_RESERVED_LBN 62 +#define ESF_DZ_RX_USR_RESERVED_WIDTH 2 +#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48 +#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14 +#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44 +#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4 +#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10 +#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8 +#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4 +#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0 +#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0 +#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44 +#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12 +#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32 +#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16 +#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28 +#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20 +#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24 +#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22 +#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22 +#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22 +#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20 +#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16 +#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12 + +/* TX_CSUM_TSTAMP_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TIMESTAMP_LBN 5 +#define ESF_DZ_TX_TIMESTAMP_WIDTH 1 +#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2 +#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2 +#define ESE_DZ_TX_OPTION_CRC_FCOE 1 +#define ESE_DZ_TX_OPTION_CRC_OFF 0 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1 +#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0 +#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1 + +/* TX_EVENT */ +#define ESF_DZ_TX_CODE_LBN 60 +#define ESF_DZ_TX_CODE_WIDTH 4 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_TX_DROP_EVENT_LBN 58 +#define ESF_DZ_TX_DROP_EVENT_WIDTH 1 +#define ESF_DZ_TX_EV_RSVD_LBN 48 +#define ESF_DZ_TX_EV_RSVD_WIDTH 10 +#define ESF_DZ_TX_SOFT2_LBN 32 +#define ESF_DZ_TX_SOFT2_WIDTH 16 +#define ESF_DZ_TX_CAN_MERGE_LBN 31 +#define ESF_DZ_TX_CAN_MERGE_WIDTH 1 +#define ESF_DZ_TX_SOFT1_LBN 24 +#define ESF_DZ_TX_SOFT1_WIDTH 7 +#define ESF_DZ_TX_QLABEL_LBN 16 +#define ESF_DZ_TX_QLABEL_WIDTH 5 +#define ESF_DZ_TX_DESCR_INDX_LBN 0 +#define ESF_DZ_TX_DESCR_INDX_WIDTH 16 + +/* TX_KER_DESC */ +#define ESF_DZ_TX_KER_TYPE_LBN 63 +#define ESF_DZ_TX_KER_TYPE_WIDTH 1 +#define ESF_DZ_TX_KER_CONT_LBN 62 +#define ESF_DZ_TX_KER_CONT_WIDTH 1 +#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48 + +/* TX_PIO_DESC */ +#define ESF_DZ_TX_PIO_TYPE_LBN 63 +#define ESF_DZ_TX_PIO_TYPE_WIDTH 1 +#define ESF_DZ_TX_PIO_OPT_LBN 60 +#define ESF_DZ_TX_PIO_OPT_WIDTH 3 +#define ESF_DZ_TX_PIO_CONT_LBN 59 +#define ESF_DZ_TX_PIO_CONT_WIDTH 1 +#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 +#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12 +#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12 + +/* TX_TSO_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 +#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8 +#define ESF_DZ_TX_TSO_IP_ID_LBN 32 +#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16 +#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 +#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 + +/* TX_USER_DESC */ +#define ESF_DZ_TX_USR_TYPE_LBN 63 +#define ESF_DZ_TX_USR_TYPE_WIDTH 1 +#define ESF_DZ_TX_USR_CONT_LBN 62 +#define ESF_DZ_TX_USR_CONT_WIDTH 1 +#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48 +#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14 +#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44 +#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4 +#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10 +#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8 +#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4 +#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0 +#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0 +#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44 +#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12 +#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32 +#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16 +#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28 +#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20 +#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24 +#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22 +#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22 +#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22 +#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20 +#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16 +#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0 +#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12 +/*************************************************************************/ + +/* TX_DESC_UPD_REG: Transmit descriptor update register. + * We may write just one dword of these registers. + */ +#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4) +#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32) +#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH + +/* The workaround for bug 35388 requires multiplexing writes through + * the TX_DESC_UPD_DWORD address. + * TX_DESC_UPD: 0ppppppppppp (bit 11 lost) + * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits) + * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost) + */ +#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD +#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8 +#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9 +#define ERF_DD_EVQ_IND_RPTR_LBN 0 +#define ERF_DD_EVQ_IND_RPTR_WIDTH 8 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2 +#define EFE_DD_EVQ_IND_TIMER_FLAGS 3 +#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8 +#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2 +#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0 +#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8 + +/* TX_PIOBUF + * PIO buffer aperture (paged) + */ +#define ER_DZ_TX_PIOBUF 4096 +#define ER_DZ_TX_PIOBUF_SIZE 2048 + +/* RX packet prefix */ +#define ES_DZ_RX_PREFIX_HASH_OFST 0 +#define ES_DZ_RX_PREFIX_VLAN1_OFST 4 +#define ES_DZ_RX_PREFIX_VLAN2_OFST 6 +#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8 +#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10 +#define ES_DZ_RX_PREFIX_SIZE 14 + +#endif /* EFX_EF10_REGS_H */ diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 19e8b95..416ee4a 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -20,7 +20,7 @@ * ************************************************************************** * - * Notes on locking strategy: + * Notes on locking strategy for the Falcon architecture: * * Many CSRs are very wide and cannot be read or written atomically. * Writes from the host are buffered by the Bus Interface Unit (BIU) @@ -54,6 +54,12 @@ * register while the collector already holds values for some other * register, the write is discarded and the collector maintains its * current state. + * + * The EF10 architecture exposes very few registers to the host and + * most of them are only 32 bits wide. The only exceptions are the MC + * doorbell register pair, which has its own latching, and + * TX_DESC_UPD, which works in a similar way to the Falcon + * architecture. */ #if BITS_PER_LONG == 64 @@ -237,8 +243,8 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ page) -/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of - * RX_DESC_UPD or TX_DESC_UPD) +/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the + * high bits of RX_DESC_UPD or TX_DESC_UPD) */ static inline void _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, @@ -249,8 +255,12 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, #define efx_writed_page(efx, value, reg, page) \ _efx_writed_page(efx, value, \ reg + \ - BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ - && (reg) != 0xa1c), \ + BUILD_BUG_ON_ZERO((reg) != 0x400 && \ + (reg) != 0x420 && \ + (reg) != 0x830 && \ + (reg) != 0x83c && \ + (reg) != 0xa18 && \ + (reg) != 0xa1c), \ page) /* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug -- cgit v0.10.2 From bedca866f854daa1f6b9a9c3e94949567cdc3409 Mon Sep 17 00:00:00 2001 From: Matthew Slattery Date: Fri, 23 Aug 2013 17:32:55 +0100 Subject: sfc: Allocate NVRAM partition ID range for PHY images Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 9e824f7..b5cf624 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -3799,6 +3799,10 @@ #define NVRAM_PARTITION_TYPE_DUMP 0x800 /* enum: Application license key storage partition */ #define NVRAM_PARTITION_TYPE_LICENSE 0x900 +/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ -- cgit v0.10.2 From 4c75b43a7795671a52a002034d370ea1352f95c8 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 29 Aug 2013 19:04:03 +0100 Subject: sfc: Make efx_mcdi_{init,fini}() call efx_mcdi_drv_attach() This should be done during MCDI initialisation for any NIC. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 8150781..d8a20f5 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -48,6 +48,8 @@ struct efx_mcdi_async_param { }; static void efx_mcdi_timeout_async(unsigned long context); +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached_out); static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) { @@ -58,6 +60,8 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; + bool already_attached; + int rc; efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); if (!efx->mcdi) @@ -78,12 +82,37 @@ int efx_mcdi_init(struct efx_nic *efx) mcdi->new_epoch = true; /* Recover from a failed assertion before probing */ - return efx_mcdi_handle_assertion(efx); + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + /* Let the MC (and BMC, if this is a LOM) know that the driver + * is loaded. We should do this before we reset the NIC. + */ + rc = efx_mcdi_drv_attach(efx, true, &already_attached); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Unable to register driver with MCPU\n"); + return rc; + } + if (already_attached) + /* Not a fatal error */ + netif_err(efx, probe, efx->net_dev, + "Host already registered with MCPU\n"); + + return 0; } void efx_mcdi_fini(struct efx_nic *efx) { - BUG_ON(efx->mcdi && efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); + if (!efx->mcdi) + return; + + BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); + + /* Relinquish the device (back to the BMC, if this is a LOM) */ + efx_mcdi_drv_attach(efx, false, NULL); + kfree(efx->mcdi); } @@ -889,8 +918,8 @@ fail: buf[0] = 0; } -int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, - bool *was_attached) +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached) { MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index e37cf1d..0ca00a6 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -273,8 +273,6 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); -extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, - bool *was_attached_out); extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, u16 *fw_subtype_list, u32 *capabilities); extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 1500405..e8eef63 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -196,7 +196,6 @@ static unsigned int siena_mem_map_size(struct efx_nic *efx) static int siena_probe_nic(struct efx_nic *efx) { struct siena_nic_data *nic_data; - bool already_attached = false; efx_oword_t reg; int rc; @@ -222,19 +221,6 @@ static int siena_probe_nic(struct efx_nic *efx) if (rc) goto fail1; - /* Let the BMC know that the driver is now in charge of link and - * filter settings. We must do this before we reset the NIC */ - rc = efx_mcdi_drv_attach(efx, true, &already_attached); - if (rc) { - netif_err(efx, probe, efx->net_dev, - "Unable to register driver with MCPU\n"); - goto fail2; - } - if (already_attached) - /* Not a fatal error */ - netif_err(efx, probe, efx->net_dev, - "Host already registered with MCPU\n"); - /* Now we can reset the NIC */ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); if (rc) { @@ -281,8 +267,6 @@ fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: - efx_mcdi_drv_attach(efx, false, NULL); -fail2: efx_mcdi_fini(efx); fail1: kfree(efx->nic_data); @@ -371,14 +355,11 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_reset(efx, RESET_TYPE_ALL); - /* Relinquish the device back to the BMC */ - efx_mcdi_drv_attach(efx, false, NULL); + efx_mcdi_fini(efx); /* Tear down the private nic state */ kfree(efx->nic_data); efx->nic_data = NULL; - - efx_mcdi_fini(efx); } #define SIENA_DMA_STAT(ext_name, mcdi_name) \ -- cgit v0.10.2 From 8127d661e77f5ec410093bce411f540afa34593f Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 29 Aug 2013 19:19:29 +0100 Subject: sfc: Add support for Solarflare SFC9100 family This adds support for the EF10 network controller architecture and the SFC9100 family, starting with SFC9120 'Farmingdale', and bumps the driver version to 4.0. New features in the SFC9100 family include: - Flexible allocation of internal resources to PCIe physical and virtual functions under firmware control - RX event merging to reduce DMA writes at high packet rates - Integrated RX timestamping - PIO buffers for lower TX latency - Firmware-driven data path that supports additional offload features and filter types - Delivery of packets between functions and to multiple recipients, allowing firmware to implement a vswitch - Multiple RX flow hash (RSS) contexts with their own hash keys and indirection tables - 40G MAC (single port only) ...not all of which are enabled in this initial driver or the initial firmware release. Much of the new code is by Jon Cooper. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 4136ccc..8b71525 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -1,5 +1,5 @@ config SFC - tristate "Solarflare SFC4000/SFC9000-family support" + tristate "Solarflare SFC4000/SFC9000/SFC9100-family support" depends on PCI select MDIO select CRC32 @@ -8,12 +8,13 @@ config SFC select PTP_1588_CLOCK ---help--- This driver supports 10-gigabit Ethernet cards based on - the Solarflare SFC4000 and SFC9000-family controllers. + the Solarflare SFC4000, SFC9000-family and SFC9100-family + controllers. To compile this driver as a module, choose M here. The module will be called sfc. config SFC_MTD - bool "Solarflare SFC4000/SFC9000-family MTD support" + bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support" depends on SFC && MTD && !(SFC=y && MTD=m) default y ---help--- @@ -21,7 +22,7 @@ config SFC_MTD (e.g. /dev/mtd1). This is required to update the firmware or the boot configuration under Linux. config SFC_MCDI_MON - bool "Solarflare SFC9000-family hwmon support" + bool "Solarflare SFC9000/SFC9100-family hwmon support" depends on SFC && HWMON && !(SFC=y && HWMON=m) default y ---help--- diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index a612726..3a83c0d 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -1,5 +1,5 @@ -sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ - selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ +sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \ + rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ tenxpress.o txc43128_phy.o falcon_boards.o \ mcdi.o mcdi_port.o mcdi_mon.o ptp.o sfc-$(CONFIG_SFC_MTD) += mtd.o diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h index 5400a33..f45b0db 100644 --- a/drivers/net/ethernet/sfc/bitfield.h +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -29,6 +29,10 @@ /* Lowest bit numbers and widths */ #define EFX_DUMMY_FIELD_LBN 0 #define EFX_DUMMY_FIELD_WIDTH 0 +#define EFX_WORD_0_LBN 0 +#define EFX_WORD_0_WIDTH 16 +#define EFX_WORD_1_LBN 16 +#define EFX_WORD_1_WIDTH 16 #define EFX_DWORD_0_LBN 0 #define EFX_DWORD_0_WIDTH 32 #define EFX_DWORD_1_LBN 32 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c new file mode 100644 index 0000000..5f42313 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef10.c @@ -0,0 +1,3043 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "ef10_regs.h" +#include "io.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" +#include "workarounds.h" +#include +#include +#include +#include + +/* Hardware control for EF10 architecture including 'Huntington'. */ + +#define EFX_EF10_DRVGEN_EV 7 +enum { + EFX_EF10_TEST = 1, + EFX_EF10_REFILL, +}; + +/* The reserved RSS context value */ +#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff + +/* The filter table(s) are managed by firmware and we have write-only + * access. When removing filters we must identify them to the + * firmware by a 64-bit handle, but this is too wide for Linux kernel + * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to + * be able to tell in advance whether a requested insertion will + * replace an existing filter. Therefore we maintain a software hash + * table, which should be at least as large as the hardware hash + * table. + * + * Huntington has a single 8K filter table shared between all filter + * types and both ports. + */ +#define HUNT_FILTER_TBL_ROWS 8192 + +struct efx_ef10_filter_table { +/* The RX match field masks supported by this fw & hw, in order of priority */ + enum efx_filter_match_flags rx_match_flags[ + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; + unsigned int rx_match_count; + + struct { + unsigned long spec; /* pointer to spec plus flag bits */ +/* BUSY flag indicates that an update is in progress. STACK_OLD is + * used to mark and sweep stack-owned MAC filters. + */ +#define EFX_EF10_FILTER_FLAG_BUSY 1UL +#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL +#define EFX_EF10_FILTER_FLAGS 3UL + u64 handle; /* firmware handle */ + } *entry; + wait_queue_head_t waitq; +/* Shadow of net_device address lists, guarded by mac_lock */ +#define EFX_EF10_FILTER_STACK_UC_MAX 32 +#define EFX_EF10_FILTER_STACK_MC_MAX 256 + struct { + u8 addr[ETH_ALEN]; + u16 id; + } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX], + stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX]; + int stack_uc_count; /* negative for PROMISC */ + int stack_mc_count; /* negative for PROMISC/ALLMULTI */ +}; + +/* An arbitrary search limit for the software hash table */ +#define EFX_EF10_FILTER_SEARCH_LIMIT 200 + +static void efx_ef10_rx_push_indir_table(struct efx_nic *efx); +static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); +static void efx_ef10_filter_table_remove(struct efx_nic *efx); + +static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) +{ + efx_dword_t reg; + + efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); + return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? + EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; +} + +static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) +{ + return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); +} + +static int efx_ef10_init_capabilities(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen >= sizeof(outbuf)) { + nic_data->datapath_caps = + MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { + netif_err(efx, drv, efx->net_dev, + "Capabilities don't indicate TSO support.\n"); + return -ENODEV; + } + } + + return 0; +} + +static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); + return rc > 0 ? rc : -ERANGE; +} + +static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) + return -EIO; + + memcpy(mac_address, + MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN); + return 0; +} + +static int efx_ef10_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data; + int i, rc; + + /* We can have one VI for each 8K region. However we need + * multiple TX queues per channel. + */ + efx->max_channels = + min_t(unsigned int, + EFX_MAX_CHANNELS, + resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / + (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); + BUG_ON(efx->max_channels == 0); + + nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + efx->nic_data = nic_data; + + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, + 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); + if (rc) + goto fail1; + + /* Get the MC's warm boot count. In case it's rebooting right + * now, be prepared to retry. + */ + i = 0; + for (;;) { + rc = efx_ef10_get_warm_boot_count(efx); + if (rc >= 0) + break; + if (++i == 5) + goto fail2; + ssleep(1); + } + nic_data->warm_boot_count = rc; + + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + + /* In case we're recovering from a crash (kexec), we want to + * cancel any outstanding request by the previous user of this + * function. We send a special message using the least + * significant bits of the 'high' (doorbell) register. + */ + _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); + + rc = efx_mcdi_init(efx); + if (rc) + goto fail2; + + /* Reset (most) configuration for this function */ + rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); + if (rc) + goto fail3; + + /* Enable event logging */ + rc = efx_mcdi_log_ctrl(efx, true, false, 0); + if (rc) + goto fail3; + + rc = efx_ef10_init_capabilities(efx); + if (rc < 0) + goto fail3; + + efx->rx_packet_len_offset = + ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { + netif_err(efx, probe, efx->net_dev, + "current firmware does not support an RX prefix\n"); + rc = -ENODEV; + goto fail3; + } + + rc = efx_mcdi_port_get_number(efx); + if (rc < 0) + goto fail3; + efx->port_num = rc; + + rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr); + if (rc) + goto fail3; + + rc = efx_ef10_get_sysclk_freq(efx); + if (rc < 0) + goto fail3; + efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ + + /* Check whether firmware supports bug 35388 workaround */ + rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); + if (rc == 0) + nic_data->workaround_35388 = true; + else if (rc != -ENOSYS && rc != -ENOENT) + goto fail3; + netif_dbg(efx, probe, efx->net_dev, + "workaround for bug 35388 is %sabled\n", + nic_data->workaround_35388 ? "en" : "dis"); + + rc = efx_mcdi_mon_probe(efx); + if (rc) + goto fail3; + + efx_ptp_probe(efx); + + return 0; + +fail3: + efx_mcdi_fini(efx); +fail2: + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); +fail1: + kfree(nic_data); + efx->nic_data = NULL; + return rc; +} + +static int efx_ef10_free_vis(struct efx_nic *efx) +{ + int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL); + + /* -EALREADY means nothing to free, so ignore */ + if (rc == -EALREADY) + rc = 0; + return rc; +} + +static void efx_ef10_remove(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + efx_mcdi_mon_remove(efx); + + /* This needs to be after efx_ptp_remove_channel() with no filters */ + efx_ef10_rx_free_indir_table(efx); + + rc = efx_ef10_free_vis(efx); + WARN_ON(rc != 0); + + efx_mcdi_fini(efx); + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); + kfree(nic_data); +} + +static int efx_ef10_alloc_vis(struct efx_nic *efx, + unsigned int min_vis, unsigned int max_vis) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); + MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); + rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) + return -EIO; + + netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", + MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); + + nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); + nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); + return 0; +} + +static int efx_ef10_dimension_resources(struct efx_nic *efx) +{ + unsigned int n_vis = + max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + + return efx_ef10_alloc_vis(efx, n_vis, n_vis); +} + +static int efx_ef10_init_nic(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + if (nic_data->must_realloc_vis) { + /* We cannot let the number of VIs change now */ + rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, + nic_data->n_allocated_vis); + if (rc) + return rc; + nic_data->must_realloc_vis = false; + } + + efx_ef10_rx_push_indir_table(efx); + return 0; +} + +static int efx_ef10_map_reset_flags(u32 *flags) +{ + enum { + EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << + ETH_RESET_SHARED_SHIFT), + EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC | + ETH_RESET_PHY | ETH_RESET_MGMT) << + ETH_RESET_SHARED_SHIFT) + }; + + /* We assume for now that our PCI function is permitted to + * reset everything. + */ + + if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { + *flags &= ~EF10_RESET_MC; + return RESET_TYPE_WORLD; + } + + if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { + *flags &= ~EF10_RESET_PORT; + return RESET_TYPE_ALL; + } + + /* no invisible reset implemented */ + + return -EINVAL; +} + +#define EF10_DMA_STAT(ext_name, mcdi_name) \ + [EF10_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ + [EF10_STAT_ ## int_name] = \ + { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define EF10_OTHER_STAT(ext_name) \ + [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { + EF10_DMA_STAT(tx_bytes, TX_BYTES), + EF10_DMA_STAT(tx_packets, TX_PKTS), + EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), + EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), + EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), + EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), + EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), + EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), + EF10_DMA_STAT(tx_64, TX_64_PKTS), + EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), + EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), + EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), + EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), + EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(rx_bytes, RX_BYTES), + EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), + EF10_OTHER_STAT(rx_good_bytes), + EF10_OTHER_STAT(rx_bad_bytes), + EF10_DMA_STAT(rx_packets, RX_PKTS), + EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), + EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), + EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), + EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), + EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), + EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), + EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), + EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), + EF10_DMA_STAT(rx_64, RX_64_PKTS), + EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), + EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), + EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), + EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), + EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), + EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), + EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), + EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), + EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), + EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), +}; + +#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ + (1ULL << EF10_STAT_tx_packets) | \ + (1ULL << EF10_STAT_tx_pause) | \ + (1ULL << EF10_STAT_tx_unicast) | \ + (1ULL << EF10_STAT_tx_multicast) | \ + (1ULL << EF10_STAT_tx_broadcast) | \ + (1ULL << EF10_STAT_rx_bytes) | \ + (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ + (1ULL << EF10_STAT_rx_good_bytes) | \ + (1ULL << EF10_STAT_rx_bad_bytes) | \ + (1ULL << EF10_STAT_rx_packets) | \ + (1ULL << EF10_STAT_rx_good) | \ + (1ULL << EF10_STAT_rx_bad) | \ + (1ULL << EF10_STAT_rx_pause) | \ + (1ULL << EF10_STAT_rx_control) | \ + (1ULL << EF10_STAT_rx_unicast) | \ + (1ULL << EF10_STAT_rx_multicast) | \ + (1ULL << EF10_STAT_rx_broadcast) | \ + (1ULL << EF10_STAT_rx_lt64) | \ + (1ULL << EF10_STAT_rx_64) | \ + (1ULL << EF10_STAT_rx_65_to_127) | \ + (1ULL << EF10_STAT_rx_128_to_255) | \ + (1ULL << EF10_STAT_rx_256_to_511) | \ + (1ULL << EF10_STAT_rx_512_to_1023) | \ + (1ULL << EF10_STAT_rx_1024_to_15xx) | \ + (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ + (1ULL << EF10_STAT_rx_gtjumbo) | \ + (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ + (1ULL << EF10_STAT_rx_overflow) | \ + (1ULL << EF10_STAT_rx_nodesc_drops)) + +/* These statistics are only provided by the 10G MAC. For a 10G/40G + * switchable port we do not expose these because they might not + * include all the packets they should. + */ +#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ + (1ULL << EF10_STAT_tx_lt64) | \ + (1ULL << EF10_STAT_tx_64) | \ + (1ULL << EF10_STAT_tx_65_to_127) | \ + (1ULL << EF10_STAT_tx_128_to_255) | \ + (1ULL << EF10_STAT_tx_256_to_511) | \ + (1ULL << EF10_STAT_tx_512_to_1023) | \ + (1ULL << EF10_STAT_tx_1024_to_15xx) | \ + (1ULL << EF10_STAT_tx_15xx_to_jumbo)) + +/* These statistics are only provided by the 40G MAC. For a 10G/40G + * switchable port we do expose these because the errors will otherwise + * be silent. + */ +#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ + (1ULL << EF10_STAT_rx_length_error)) + +#if BITS_PER_LONG == 64 +#define STAT_MASK_BITMAP(bits) (bits) +#else +#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 +#endif + +static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) +{ + static const unsigned long hunt_40g_stat_mask[] = { + STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | + HUNT_40G_EXTRA_STAT_MASK) + }; + static const unsigned long hunt_10g_only_stat_mask[] = { + STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | + HUNT_10G_ONLY_STAT_MASK) + }; + u32 port_caps = efx_mcdi_phy_get_caps(efx); + + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + return hunt_40g_stat_mask; + else + return hunt_10g_only_stat_mask; +} + +static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) +{ + return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, + efx_ef10_stat_mask(efx), names); +} + +static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + const unsigned long *stats_mask = efx_ef10_stat_mask(efx); + __le64 generation_start, generation_end; + u64 *stats = nic_data->stats; + __le64 *dma_stats; + + dma_stats = efx->stats_buffer.addr; + nic_data = efx->nic_data; + + generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + return 0; + rmb(); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, + stats, efx->stats_buffer.addr, false); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) + return -EAGAIN; + + /* Update derived statistics */ + stats[EF10_STAT_rx_good_bytes] = + stats[EF10_STAT_rx_bytes] - + stats[EF10_STAT_rx_bytes_minus_good_bytes]; + efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], + stats[EF10_STAT_rx_bytes_minus_good_bytes]); + + return 0; +} + + +static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + const unsigned long *mask = efx_ef10_stat_mask(efx); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + size_t stats_count = 0, index; + int retry; + + /* If we're unlucky enough to read statistics during the DMA, wait + * up to 10ms for it to finish (typically takes <500us) + */ + for (retry = 0; retry < 100; ++retry) { + if (efx_ef10_try_update_nic_stats(efx) == 0) + break; + udelay(100); + } + + if (full_stats) { + for_each_set_bit(index, mask, EF10_STAT_COUNT) { + if (efx_ef10_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (core_stats) { + core_stats->rx_packets = stats[EF10_STAT_rx_packets]; + core_stats->tx_packets = stats[EF10_STAT_tx_packets]; + core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; + core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops]; + core_stats->multicast = stats[EF10_STAT_rx_multicast]; + core_stats->rx_length_errors = + stats[EF10_STAT_rx_gtjumbo] + + stats[EF10_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors); + } + + return stats_count; +} + +static void efx_ef10_push_irq_moderation(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned int mode, value; + efx_dword_t timer_cmd; + + if (channel->irq_moderation) { + mode = 3; + value = channel->irq_moderation - 1; + } else { + mode = 0; + value = 0; + } + + if (EFX_EF10_WORKAROUND_35388(efx)) { + EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, + EFE_DD_EVQ_IND_TIMER_FLAGS, + ERF_DD_EVQ_IND_TIMER_MODE, mode, + ERF_DD_EVQ_IND_TIMER_VAL, value); + efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, + channel->channel); + } else { + EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, + ERF_DZ_TC_TIMER_VAL, value); + efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, + channel->channel); + } +} + +static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) +{ + if (type != 0) + return -EINVAL; + return 0; +} + +static void efx_ef10_mcdi_request(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 *pdu = nic_data->mcdi_buf.addr; + + memcpy(pdu, hdr, hdr_len); + memcpy(pdu + hdr_len, sdu, sdu_len); + wmb(); + + /* The hardware provides 'low' and 'high' (doorbell) registers + * for passing the 64-bit address of an MCDI request to + * firmware. However the dwords are swapped by firmware. The + * least significant bits of the doorbell are then 0 for all + * MCDI requests due to alignment. + */ + _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), + ER_DZ_MC_DB_LWRD); + _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), + ER_DZ_MC_DB_HWRD); +} + +static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; + + rmb(); + return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void +efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, + size_t offset, size_t outlen) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + const u8 *pdu = nic_data->mcdi_buf.addr; + + memcpy(outbuf, pdu + offset, outlen); +} + +static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_ef10_get_warm_boot_count(efx); + if (rc < 0) { + /* The firmware is presumably in the process of + * rebooting. However, we are supposed to report each + * reboot just once, so we must only do that once we + * can read and store the updated warm boot count. + */ + return 0; + } + + if (rc == nic_data->warm_boot_count) + return 0; + + nic_data->warm_boot_count = rc; + + /* All our allocations have been reset */ + nic_data->must_realloc_vis = true; + nic_data->must_restore_filters = true; + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + + return -EIO; +} + +/* Handle an MSI interrupt + * + * Handle an MSI hardware interrupt. This routine schedules event + * queue processing. No interrupt acknowledgement cycle is necessary. + * Also, we never need to check that the interrupt is for us, since + * MSI interrupts cannot be shared. + */ +static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); + + if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { + /* Note test interrupts */ + if (context->index == efx->irq_level) + efx->last_irq_cpu = raw_smp_processor_id(); + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(efx->channel[context->index]); + } + + return IRQ_HANDLED; +} + +static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) +{ + struct efx_nic *efx = dev_id; + bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + struct efx_channel *channel; + efx_dword_t reg; + u32 queues; + + /* Read the ISR which also ACKs the interrupts */ + efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); + queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); + + if (queues == 0) + return IRQ_NONE; + + if (likely(soft_enabled)) { + /* Note test interrupts */ + if (queues & (1U << efx->irq_level)) + efx->last_irq_cpu = raw_smp_processor_id(); + + efx_for_each_channel(channel, efx) { + if (queues & 1) + efx_schedule_channel_irq(channel); + queues >>= 1; + } + } + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); + + return IRQ_HANDLED; +} + +static void efx_ef10_irq_test_generate(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); + + BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); + (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) +{ + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, + (tx_queue->ptr_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +/* This writes to the TX_DESC_WPTR and also pushes data */ +static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, + const efx_qword_t *txd) +{ + unsigned int write_ptr; + efx_oword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); + reg.qword[0] = *txd; + efx_writeo_page(tx_queue->efx, ®, + ER_DZ_TX_DESC_UPD, tx_queue->queue); +} + +static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN); + bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; + size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; + struct efx_channel *channel = tx_queue->channel; + struct efx_nic *efx = tx_queue->efx; + size_t inlen, outlen; + dma_addr_t dma_addr; + efx_qword_t *txd; + int rc; + int i; + + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); + MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS, + INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, + INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + dma_addr = tx_queue->txd.buf.dma_addr; + + netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", + tx_queue->queue, entries, (u64)dma_addr); + + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + /* A previous user of this TX queue might have set us up the + * bomb by writing a descriptor to the TX push collector but + * not the doorbell. (Each collector belongs to a port, not a + * queue or function, so cannot easily be reset.) We must + * attempt to push a no-op descriptor in its place. + */ + tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; + tx_queue->insert_count = 1; + txd = efx_tx_desc(tx_queue, 0); + EFX_POPULATE_QWORD_4(*txd, + ESF_DZ_TX_DESC_IS_OPT, true, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_CRC_CSUM, + ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, + ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); + tx_queue->write_count = 1; + wmb(); + efx_ef10_push_tx_desc(tx_queue, txd); + + return; + +fail: + WARN_ON(true); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN); + struct efx_nic *efx = tx_queue->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, + tx_queue->queue); + + rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) +{ + efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); +} + +/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ +static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned int write_ptr; + efx_dword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(tx_queue->efx, ®, + ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); +} + +static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) +{ + unsigned int old_write_count = tx_queue->write_count; + struct efx_tx_buffer *buffer; + unsigned int write_ptr; + efx_qword_t *txd; + + BUG_ON(tx_queue->write_count == tx_queue->insert_count); + + do { + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + txd = efx_tx_desc(tx_queue, write_ptr); + ++tx_queue->write_count; + + /* Create TX descriptor ring entry */ + if (buffer->flags & EFX_TX_BUF_OPTION) { + *txd = buffer->option; + } else { + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); + EFX_POPULATE_QWORD_3( + *txd, + ESF_DZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, + ESF_DZ_TX_KER_BYTE_CNT, buffer->len, + ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); + } + } while (tx_queue->write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { + txd = efx_tx_desc(tx_queue, + old_write_count & tx_queue->ptr_mask); + efx_ef10_push_tx_desc(tx_queue, txd); + ++tx_queue->pushes; + } else { + efx_ef10_notify_tx_desc(tx_queue); + } +} + +static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, + EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, + MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, + EFX_MAX_CHANNELS); + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) + return -EIO; + + *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); + + return 0; +} + +static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, + context); + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + WARN_ON(rc != 0); +} + +static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) +{ + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); + int i, rc; + + MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, + context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) + MCDI_PTR(tablebuf, + RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = + (u8) efx->rx_indir_table[i]; + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, + sizeof(tablebuf), NULL, 0, NULL); + if (rc != 0) + return rc; + + MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, + context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) + MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = + efx->rx_hash_key[i]; + + return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, + sizeof(keybuf), NULL, 0, NULL); +} + +static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) + efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; +} + +static void efx_ef10_rx_push_indir_table(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n"); + + if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { + rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); + if (rc != 0) + goto fail; + } + + rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context); + if (rc != 0) + goto fail; + + return; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) +{ + return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, + (rx_queue->ptr_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN); + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; + struct efx_nic *efx = rx_queue->efx; + size_t inlen, outlen; + dma_addr_t dma_addr; + int rc; + int i; + + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, + efx_rx_queue_index(rx_queue)); + MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS, + INIT_RXQ_IN_FLAG_PREFIX, 1); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + dma_addr = rx_queue->rxd.buf.dma_addr; + + netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", + efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); + + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + return; + +fail: + WARN_ON(true); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN); + struct efx_nic *efx = rx_queue->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, + efx_rx_queue_index(rx_queue)); + + rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) +{ + efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); +} + +/* This creates an entry in the RX descriptor queue */ +static inline void +efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + struct efx_rx_buffer *rx_buf; + efx_qword_t *rxd; + + rxd = efx_rx_desc(rx_queue, index); + rx_buf = efx_rx_buffer(rx_queue, index); + EFX_POPULATE_QWORD_2(*rxd, + ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, + ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); +} + +static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int write_count; + efx_dword_t reg; + + /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ + write_count = rx_queue->added_count & ~7; + if (rx_queue->notified_count == write_count) + return; + + do + efx_ef10_build_rx_desc( + rx_queue, + rx_queue->notified_count & rx_queue->ptr_mask); + while (++rx_queue->notified_count != write_count); + + wmb(); + EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, + write_count & rx_queue->ptr_mask); + efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, + efx_rx_queue_index(rx_queue)); +} + +static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; + +static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + efx_qword_t event; + + EFX_POPULATE_QWORD_2(event, + ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, + ESF_DZ_EV_DATA, EFX_EF10_REFILL); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, + inbuf, sizeof(inbuf), 0, + efx_ef10_rx_defer_refill_complete, 0); +} + +static void +efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + /* nothing to do */ +} + +static int efx_ef10_ev_probe(struct efx_channel *channel) +{ + return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, + (channel->eventq_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +static int efx_ef10_ev_init(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); + size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; + struct efx_nic *efx = channel->efx; + struct efx_ef10_nic_data *nic_data; + bool supports_rx_merge; + size_t inlen, outlen; + dma_addr_t dma_addr; + int rc; + int i; + + nic_data = efx->nic_data; + supports_rx_merge = + !!(nic_data->datapath_caps & + 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); + + /* Fill event queue with all ones (i.e. empty events) */ + memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); + + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); + /* INIT_EVQ expects index in vector table, not absolute */ + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); + MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, + INIT_EVQ_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_IN_FLAG_RX_MERGE, 1, + INIT_EVQ_IN_FLAG_TX_MERGE, 1, + INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, + MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, + MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); + + dma_addr = channel->eventq.buf.dma_addr; + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + /* IRQ return is ignored */ + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static void efx_ef10_ev_fini(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN); + struct efx_nic *efx = channel->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); + + rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static void efx_ef10_ev_remove(struct efx_channel *channel) +{ + efx_nic_free_buffer(channel->efx, &channel->eventq.buf); +} + +static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, + unsigned int rx_queue_label) +{ + struct efx_nic *efx = rx_queue->efx; + + netif_info(efx, hw, efx->net_dev, + "rx event arrived on queue %d labeled as queue %u\n", + efx_rx_queue_index(rx_queue), rx_queue_label); + + efx_schedule_reset(efx, RESET_TYPE_DISABLE); +} + +static void +efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, + unsigned int actual, unsigned int expected) +{ + unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; + struct efx_nic *efx = rx_queue->efx; + + netif_info(efx, hw, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, actual, expected); + + efx_schedule_reset(efx, RESET_TYPE_DISABLE); +} + +/* partially received RX was aborted. clean up. */ +static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) +{ + unsigned int rx_desc_ptr; + + WARN_ON(rx_queue->scatter_n == 0); + + netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, + "scattered RX aborted (dropping %u buffers)\n", + rx_queue->scatter_n); + + rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; + + efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, + 0, EFX_RX_PKT_DISCARD); + + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; +} + +static int efx_ef10_handle_rx_event(struct efx_channel *channel, + const efx_qword_t *event) +{ + unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; + unsigned int n_descs, n_packets, i; + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue; + bool rx_cont; + u16 flags = 0; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return 0; + + /* Basic packet information */ + rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); + next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); + rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); + rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); + rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); + + WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)); + + rx_queue = efx_channel_get_rx_queue(channel); + + if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) + efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); + + n_descs = ((next_ptr_lbits - rx_queue->removed_count) & + ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); + + if (n_descs != rx_queue->scatter_n + 1) { + /* detect rx abort */ + if (unlikely(n_descs == rx_queue->scatter_n)) { + WARN_ON(rx_bytes != 0); + efx_ef10_handle_rx_abort(rx_queue); + return 0; + } + + if (unlikely(rx_queue->scatter_n != 0)) { + /* Scattered packet completions cannot be + * merged, so something has gone wrong. + */ + efx_ef10_handle_rx_bad_lbits( + rx_queue, next_ptr_lbits, + (rx_queue->removed_count + + rx_queue->scatter_n + 1) & + ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); + return 0; + } + + /* Merged completion for multiple non-scattered packets */ + rx_queue->scatter_n = 1; + rx_queue->scatter_len = 0; + n_packets = n_descs; + ++channel->n_rx_merge_events; + channel->n_rx_merge_packets += n_packets; + flags |= EFX_RX_PKT_PREFIX_LEN; + } else { + ++rx_queue->scatter_n; + rx_queue->scatter_len += rx_bytes; + if (rx_cont) + return 0; + n_packets = 1; + } + + if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) + flags |= EFX_RX_PKT_DISCARD; + + if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { + channel->n_rx_ip_hdr_chksum_err += n_packets; + } else if (unlikely(EFX_QWORD_FIELD(*event, + ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { + channel->n_rx_tcp_udp_chksum_err += n_packets; + } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || + rx_l4_class == ESE_DZ_L4_CLASS_UDP) { + flags |= EFX_RX_PKT_CSUMMED; + } + + if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) + flags |= EFX_RX_PKT_TCP; + + channel->irq_mod_score += 2 * n_packets; + + /* Handle received packet(s) */ + for (i = 0; i < n_packets; i++) { + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_queue->scatter_len, + flags); + rx_queue->removed_count += rx_queue->scatter_n; + } + + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + + return n_packets; +} + +static int +efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_tx_queue *tx_queue; + unsigned int tx_ev_desc_ptr; + unsigned int tx_ev_q_label; + int tx_descs = 0; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return 0; + + if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) + return 0; + + /* Transmit completion */ + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); + tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); + tx_queue = efx_channel_get_tx_queue(channel, + tx_ev_q_label % EFX_TXQ_TYPES); + tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & + tx_queue->ptr_mask); + efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); + + return tx_descs; +} + +static void +efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + int subcode; + + subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); + + switch (subcode) { + case ESE_DZ_DRV_TIMER_EV: + case ESE_DZ_DRV_WAKE_UP_EV: + break; + case ESE_DZ_DRV_START_UP_EV: + /* event queue init complete. ok. */ + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown driver event type %d" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, subcode, + EFX_QWORD_VAL(*event)); + + } +} + +static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + u32 subcode; + + subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); + + switch (subcode) { + case EFX_EF10_TEST: + channel->event_test_cpu = raw_smp_processor_id(); + break; + case EFX_EF10_REFILL: + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here + */ + efx_fast_push_rx_descriptors(&channel->rx_queue); + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown driver event type %u" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, (unsigned) subcode, + EFX_QWORD_VAL(*event)); + } +} + +static int efx_ef10_ev_process(struct efx_channel *channel, int quota) +{ + struct efx_nic *efx = channel->efx; + efx_qword_t event, *p_event; + unsigned int read_ptr; + int ev_code; + int tx_descs = 0; + int spent = 0; + + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr); + event = *p_event; + + if (!efx_event_present(&event)) + break; + + EFX_SET_QWORD(*p_event); + + ++read_ptr; + + ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); + + netif_vdbg(efx, drv, efx->net_dev, + "processing event on %d " EFX_QWORD_FMT "\n", + channel->channel, EFX_QWORD_VAL(event)); + + switch (ev_code) { + case ESE_DZ_EV_CODE_MCDI_EV: + efx_mcdi_process_event(channel, &event); + break; + case ESE_DZ_EV_CODE_RX_EV: + spent += efx_ef10_handle_rx_event(channel, &event); + if (spent >= quota) { + /* XXX can we split a merged event to + * avoid going over-quota? + */ + spent = quota; + goto out; + } + break; + case ESE_DZ_EV_CODE_TX_EV: + tx_descs += efx_ef10_handle_tx_event(channel, &event); + if (tx_descs > efx->txq_entries) { + spent = quota; + goto out; + } else if (++spent == quota) { + goto out; + } + break; + case ESE_DZ_EV_CODE_DRIVER_EV: + efx_ef10_handle_driver_event(channel, &event); + if (++spent == quota) + goto out; + break; + case EFX_EF10_DRVGEN_EV: + efx_ef10_handle_driver_generated_event(channel, &event); + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown event type %d" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, ev_code, + EFX_QWORD_VAL(event)); + } + } + +out: + channel->eventq_read_ptr = read_ptr; + return spent; +} + +static void efx_ef10_ev_read_ack(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + efx_dword_t rptr; + + if (EFX_EF10_WORKAROUND_35388(efx)) { + BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < + (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); + BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > + (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); + + EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, + ERF_DD_EVQ_IND_RPTR, + (channel->eventq_read_ptr & + channel->eventq_mask) >> + ERF_DD_EVQ_IND_RPTR_WIDTH); + efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, + channel->channel); + EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, + ERF_DD_EVQ_IND_RPTR, + channel->eventq_read_ptr & + ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); + efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, + channel->channel); + } else { + EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, + channel->eventq_read_ptr & + channel->eventq_mask); + efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); + } +} + +static void efx_ef10_ev_test_generate(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + struct efx_nic *efx = channel->efx; + efx_qword_t event; + int rc; + + EFX_POPULATE_QWORD_2(event, + ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, + ESF_DZ_EV_DATA, EFX_EF10_TEST); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc != 0) + goto fail; + + return; + +fail: + WARN_ON(true); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +void efx_ef10_handle_drain_event(struct efx_nic *efx) +{ + if (atomic_dec_and_test(&efx->active_queues)) + wake_up(&efx->flush_wq); + + WARN_ON(atomic_read(&efx->active_queues) < 0); +} + +static int efx_ef10_fini_dmaq(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int pending; + + /* If the MC has just rebooted, the TX/RX queues will have already been + * torn down, but efx->active_queues needs to be set to zero. + */ + if (nic_data->must_realloc_vis) { + atomic_set(&efx->active_queues, 0); + return 0; + } + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_ef10_rx_fini(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_ef10_tx_fini(tx_queue); + } + + wait_event_timeout(efx->flush_wq, + atomic_read(&efx->active_queues) == 0, + msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); + pending = atomic_read(&efx->active_queues); + if (pending) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", + pending); + return -ETIMEDOUT; + } + } + + return 0; +} + +static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); + /* XXX should we randomise the initval? */ +} + +/* Decide whether a filter should be exclusive or else should allow + * delivery to additional recipients. Currently we decide that + * filters for specific local unicast MAC and IP addresses are + * exclusive. + */ +static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && + !is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + !ipv4_is_multicast(spec->loc_host[0])) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] != 0xff) + return true; + } + + return false; +} + +static struct efx_filter_spec * +efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, + unsigned int filter_idx) +{ + return (struct efx_filter_spec *)(table->entry[filter_idx].spec & + ~EFX_EF10_FILTER_FLAGS); +} + +static unsigned int +efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, + unsigned int filter_idx) +{ + return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; +} + +static void +efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, + unsigned int filter_idx, + const struct efx_filter_spec *spec, + unsigned int flags) +{ + table->entry[filter_idx].spec = (unsigned long)spec | flags; +} + +static void efx_ef10_filter_push_prep(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf, u64 handle, + bool replacing) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); + + if (replacing) { + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_REPLACE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); + } else { + u32 match_fields = 0; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_INSERT : + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); + + /* Convert match flags and values. Unlike almost + * everything else in MCDI, these fields are in + * network byte order. + */ + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + match_fields |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : + 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; +#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ + if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ + match_fields |= \ + 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN; \ + BUILD_BUG_ON( \ + MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ + sizeof(spec->gen_field)); \ + memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ + &spec->gen_field, sizeof(spec->gen_field)); \ + } + COPY_FIELD(REM_HOST, rem_host, SRC_IP); + COPY_FIELD(LOC_HOST, loc_host, DST_IP); + COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); + COPY_FIELD(REM_PORT, rem_port, SRC_PORT); + COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); + COPY_FIELD(LOC_PORT, loc_port, DST_PORT); + COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); + COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); + COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); + COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); +#undef COPY_FIELD + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, + match_fields); + } + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? + MC_CMD_FILTER_OP_IN_RX_DEST_DROP : + MC_CMD_FILTER_OP_IN_RX_DEST_HOST); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, + MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, + (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? + MC_CMD_FILTER_OP_IN_RX_MODE_RSS : + MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); + if (spec->flags & EFX_FILTER_FLAG_RX_RSS) + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, + spec->rss_context != + EFX_FILTER_RSS_CONTEXT_DEFAULT ? + spec->rss_context : nic_data->rx_rss_context); +} + +static int efx_ef10_filter_push(struct efx_nic *efx, + const struct efx_filter_spec *spec, + u64 *handle, bool replacing) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); + int rc; + + efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc == 0) + *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); + return rc; +} + +static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, + enum efx_filter_match_flags match_flags) +{ + unsigned int match_pri; + + for (match_pri = 0; + match_pri < table->rx_match_count; + match_pri++) + if (table->rx_match_flags[match_pri] == match_flags) + return match_pri; + + return -EPROTONOSUPPORT; +} + +static s32 efx_ef10_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + struct efx_filter_spec *saved_spec; + unsigned int match_pri, hash; + unsigned int priv_flags; + bool replacing = false; + int ins_index = -1; + DEFINE_WAIT(wait); + bool is_mc_recip; + s32 rc; + + /* For now, only support RX filters */ + if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != + EFX_FILTER_FLAG_RX) + return -EINVAL; + + rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); + if (rc < 0) + return rc; + match_pri = rc; + + hash = efx_ef10_filter_hash(spec); + is_mc_recip = efx_filter_is_mc_recipient(spec); + if (is_mc_recip) + bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + + /* Find any existing filters with the same match tuple or + * else a free slot to insert at. If any of them are busy, + * we have to wait and retry. + */ + for (;;) { + unsigned int depth = 1; + unsigned int i; + + spin_lock_bh(&efx->filter_lock); + + for (;;) { + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + saved_spec = efx_ef10_filter_entry_spec(table, i); + + if (!saved_spec) { + if (ins_index < 0) + ins_index = i; + } else if (efx_ef10_filter_equal(spec, saved_spec)) { + if (table->entry[i].spec & + EFX_EF10_FILTER_FLAG_BUSY) + break; + if (spec->priority < saved_spec->priority && + !(saved_spec->priority == + EFX_FILTER_PRI_REQUIRED && + saved_spec->flags & + EFX_FILTER_FLAG_RX_STACK)) { + rc = -EPERM; + goto out_unlock; + } + if (!is_mc_recip) { + /* This is the only one */ + if (spec->priority == + saved_spec->priority && + !replace_equal) { + rc = -EEXIST; + goto out_unlock; + } + ins_index = i; + goto found; + } else if (spec->priority > + saved_spec->priority || + (spec->priority == + saved_spec->priority && + replace_equal)) { + if (ins_index < 0) + ins_index = i; + else + __set_bit(depth, mc_rem_map); + } + } + + /* Once we reach the maximum search depth, use + * the first suitable slot or return -EBUSY if + * there was none + */ + if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { + if (ins_index < 0) { + rc = -EBUSY; + goto out_unlock; + } + goto found; + } + + ++depth; + } + + prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&efx->filter_lock); + schedule(); + } + +found: + /* Create a software table entry if necessary, and mark it + * busy. We might yet fail to insert, but any attempt to + * insert a conflicting filter while we're waiting for the + * firmware must find the busy entry. + */ + saved_spec = efx_ef10_filter_entry_spec(table, ins_index); + if (saved_spec) { + if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { + /* Just make sure it won't be removed */ + saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; + table->entry[ins_index].spec &= + ~EFX_EF10_FILTER_FLAG_STACK_OLD; + rc = ins_index; + goto out_unlock; + } + replacing = true; + priv_flags = efx_ef10_filter_entry_flags(table, ins_index); + } else { + saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); + if (!saved_spec) { + rc = -ENOMEM; + goto out_unlock; + } + *saved_spec = *spec; + priv_flags = 0; + } + efx_ef10_filter_set_entry(table, ins_index, saved_spec, + priv_flags | EFX_EF10_FILTER_FLAG_BUSY); + + /* Mark lower-priority multicast recipients busy prior to removal */ + if (is_mc_recip) { + unsigned int depth, i; + + for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + if (test_bit(depth, mc_rem_map)) + table->entry[i].spec |= + EFX_EF10_FILTER_FLAG_BUSY; + } + } + + spin_unlock_bh(&efx->filter_lock); + + rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, + replacing); + + /* Finalise the software table entry */ + spin_lock_bh(&efx->filter_lock); + if (rc == 0) { + if (replacing) { + /* Update the fields that may differ */ + saved_spec->priority = spec->priority; + saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK; + saved_spec->flags |= spec->flags; + saved_spec->rss_context = spec->rss_context; + saved_spec->dmaq_id = spec->dmaq_id; + } + } else if (!replacing) { + kfree(saved_spec); + saved_spec = NULL; + } + efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); + + /* Remove and finalise entries for lower-priority multicast + * recipients + */ + if (is_mc_recip) { + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + unsigned int depth, i; + + memset(inbuf, 0, sizeof(inbuf)); + + for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { + if (!test_bit(depth, mc_rem_map)) + continue; + + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + saved_spec = efx_ef10_filter_entry_spec(table, i); + priv_flags = efx_ef10_filter_entry_flags(table, i); + + if (rc == 0) { + spin_unlock_bh(&efx->filter_lock); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[i].handle); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), + NULL, 0, NULL); + spin_lock_bh(&efx->filter_lock); + } + + if (rc == 0) { + kfree(saved_spec); + saved_spec = NULL; + priv_flags = 0; + } else { + priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; + } + efx_ef10_filter_set_entry(table, i, saved_spec, + priv_flags); + } + } + + /* If successful, return the inserted filter ID */ + if (rc == 0) + rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; + + wake_up_all(&table->waitq); +out_unlock: + spin_unlock_bh(&efx->filter_lock); + finish_wait(&table->waitq, &wait); + return rc; +} + +void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) +{ + /* no need to do anything here on EF10 */ +} + +/* Remove a filter. + * If !stack_requested, remove by ID + * If stack_requested, remove by index + * Filter ID may come from userland and must be range-checked. + */ +static int efx_ef10_filter_remove_internal(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, bool stack_requested) +{ + unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + struct efx_ef10_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FILTER_OP_IN_HANDLE_OFST + + MC_CMD_FILTER_OP_IN_HANDLE_LEN); + struct efx_filter_spec *spec; + DEFINE_WAIT(wait); + int rc; + + /* Find the software table entry and mark it busy. Don't + * remove it yet; any attempt to update while we're waiting + * for the firmware must find the busy entry. + */ + for (;;) { + spin_lock_bh(&efx->filter_lock); + if (!(table->entry[filter_idx].spec & + EFX_EF10_FILTER_FLAG_BUSY)) + break; + prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&efx->filter_lock); + schedule(); + } + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (!spec || spec->priority > priority || + (!stack_requested && + efx_ef10_filter_rx_match_pri(table, spec->match_flags) != + filter_id / HUNT_FILTER_TBL_ROWS)) { + rc = -ENOENT; + goto out_unlock; + } + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; + spin_unlock_bh(&efx->filter_lock); + + if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) { + /* Reset steering of a stack-owned filter */ + + struct efx_filter_spec new_spec = *spec; + + new_spec.priority = EFX_FILTER_PRI_REQUIRED; + new_spec.flags = (EFX_FILTER_FLAG_RX | + EFX_FILTER_FLAG_RX_RSS | + EFX_FILTER_FLAG_RX_STACK); + new_spec.dmaq_id = 0; + new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; + rc = efx_ef10_filter_push(efx, &new_spec, + &table->entry[filter_idx].handle, + true); + + spin_lock_bh(&efx->filter_lock); + if (rc == 0) + *spec = new_spec; + } else { + /* Really remove the filter */ + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_REMOVE : + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + + spin_lock_bh(&efx->filter_lock); + if (rc == 0) { + kfree(spec); + efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } + } + table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; + wake_up_all(&table->waitq); +out_unlock: + spin_unlock_bh(&efx->filter_lock); + finish_wait(&table->waitq, &wait); + return rc; +} + +static int efx_ef10_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx_ef10_filter_remove_internal(efx, priority, filter_id, false); +} + +static int efx_ef10_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + struct efx_ef10_filter_table *table = efx->filter_state; + const struct efx_filter_spec *saved_spec; + int rc; + + spin_lock_bh(&efx->filter_lock); + saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (saved_spec && saved_spec->priority == priority && + efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == + filter_id / HUNT_FILTER_TBL_ROWS) { + *spec = *saved_spec; + rc = 0; + } else { + rc = -ENOENT; + } + spin_unlock_bh(&efx->filter_lock); + return rc; +} + +static void efx_ef10_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + /* TODO */ +} + +static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + unsigned int filter_idx; + s32 count = 0; + + spin_lock_bh(&efx->filter_lock); + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + if (table->entry[filter_idx].spec && + efx_ef10_filter_entry_spec(table, filter_idx)->priority == + priority) + ++count; + } + spin_unlock_bh(&efx->filter_lock); + return count; +} + +static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + + return table->rx_match_count * HUNT_FILTER_TBL_ROWS; +} + +static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_filter_spec *spec; + unsigned int filter_idx; + s32 count = 0; + + spin_lock_bh(&efx->filter_lock); + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (spec && spec->priority == priority) { + if (count == size) { + count = -EMSGSIZE; + break; + } + buf[count++] = (efx_ef10_filter_rx_match_pri( + table, spec->match_flags) * + HUNT_FILTER_TBL_ROWS + + filter_idx); + } + } + spin_unlock_bh(&efx->filter_lock); + return count; +} + +#ifdef CONFIG_RFS_ACCEL + +static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; + +static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, + struct efx_filter_spec *spec) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + struct efx_filter_spec *saved_spec; + unsigned int hash, i, depth = 1; + bool replacing = false; + int ins_index = -1; + u64 cookie; + s32 rc; + + /* Must be an RX filter without RSS and not for a multicast + * destination address (RFS only works for connected sockets). + * These restrictions allow us to pass only a tiny amount of + * data through to the completion function. + */ + EFX_WARN_ON_PARANOID(spec->flags != + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); + EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); + EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); + + hash = efx_ef10_filter_hash(spec); + + spin_lock_bh(&efx->filter_lock); + + /* Find any existing filter with the same match tuple or else + * a free slot to insert at. If an existing filter is busy, + * we have to give up. + */ + for (;;) { + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + saved_spec = efx_ef10_filter_entry_spec(table, i); + + if (!saved_spec) { + if (ins_index < 0) + ins_index = i; + } else if (efx_ef10_filter_equal(spec, saved_spec)) { + if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { + rc = -EBUSY; + goto fail_unlock; + } + EFX_WARN_ON_PARANOID(saved_spec->flags & + EFX_FILTER_FLAG_RX_STACK); + if (spec->priority < saved_spec->priority) { + rc = -EPERM; + goto fail_unlock; + } + ins_index = i; + break; + } + + /* Once we reach the maximum search depth, use the + * first suitable slot or return -EBUSY if there was + * none + */ + if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { + if (ins_index < 0) { + rc = -EBUSY; + goto fail_unlock; + } + break; + } + + ++depth; + } + + /* Create a software table entry if necessary, and mark it + * busy. We might yet fail to insert, but any attempt to + * insert a conflicting filter while we're waiting for the + * firmware must find the busy entry. + */ + saved_spec = efx_ef10_filter_entry_spec(table, ins_index); + if (saved_spec) { + replacing = true; + } else { + saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); + if (!saved_spec) { + rc = -ENOMEM; + goto fail_unlock; + } + *saved_spec = *spec; + } + efx_ef10_filter_set_entry(table, ins_index, saved_spec, + EFX_EF10_FILTER_FLAG_BUSY); + + spin_unlock_bh(&efx->filter_lock); + + /* Pack up the variables needed on completion */ + cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; + + efx_ef10_filter_push_prep(efx, spec, inbuf, + table->entry[ins_index].handle, replacing); + efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + MC_CMD_FILTER_OP_OUT_LEN, + efx_ef10_filter_rfs_insert_complete, cookie); + + return ins_index; + +fail_unlock: + spin_unlock_bh(&efx->filter_lock); + return rc; +} + +static void +efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + unsigned int ins_index, dmaq_id; + struct efx_filter_spec *spec; + bool replacing; + + /* Unpack the cookie */ + replacing = cookie >> 31; + ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); + dmaq_id = cookie & 0xffff; + + spin_lock_bh(&efx->filter_lock); + spec = efx_ef10_filter_entry_spec(table, ins_index); + if (rc == 0) { + table->entry[ins_index].handle = + MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); + if (replacing) + spec->dmaq_id = dmaq_id; + } else if (!replacing) { + kfree(spec); + spec = NULL; + } + efx_ef10_filter_set_entry(table, ins_index, spec, 0); + spin_unlock_bh(&efx->filter_lock); + + wake_up_all(&table->waitq); +} + +static void +efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, + unsigned long filter_idx, + int rc, efx_dword_t *outbuf, + size_t outlen_actual); + +static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int filter_idx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_filter_spec *spec = + efx_ef10_filter_entry_spec(table, filter_idx); + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FILTER_OP_IN_HANDLE_OFST + + MC_CMD_FILTER_OP_IN_HANDLE_LEN); + + if (!spec || + (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || + spec->priority != EFX_FILTER_PRI_HINT || + !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, + flow_id, filter_idx)) + return false; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_REMOVE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, + efx_ef10_filter_rfs_expire_complete, filter_idx)) + return false; + + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; + return true; +} + +static void +efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, + unsigned long filter_idx, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_filter_spec *spec = + efx_ef10_filter_entry_spec(table, filter_idx); + + spin_lock_bh(&efx->filter_lock); + if (rc == 0) { + kfree(spec); + efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } + table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; + wake_up_all(&table->waitq); + spin_unlock_bh(&efx->filter_lock); +} + +#endif /* CONFIG_RFS_ACCEL */ + +static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) +{ + int match_flags = 0; + +#define MAP_FLAG(gen_flag, mcdi_field) { \ + u32 old_mcdi_flags = mcdi_flags; \ + mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ + if (mcdi_flags != old_mcdi_flags) \ + match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ + } + MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, SRC_IP); + MAP_FLAG(LOC_HOST, DST_IP); + MAP_FLAG(REM_MAC, SRC_MAC); + MAP_FLAG(REM_PORT, SRC_PORT); + MAP_FLAG(LOC_MAC, DST_MAC); + MAP_FLAG(LOC_PORT, DST_PORT); + MAP_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(IP_PROTO, IP_PROTO); +#undef MAP_FLAG + + /* Did we map them all? */ + if (mcdi_flags) + return -EINVAL; + + return match_flags; +} + +static int efx_ef10_filter_table_probe(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); + unsigned int pd_match_pri, pd_match_count; + struct efx_ef10_filter_table *table; + size_t outlen; + int rc; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + /* Find out which RX filter types are supported, and their priorities */ + MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, + inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + if (rc) + goto fail; + pd_match_count = MCDI_VAR_ARRAY_LEN( + outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); + table->rx_match_count = 0; + + for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { + u32 mcdi_flags = + MCDI_ARRAY_DWORD( + outbuf, + GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, + pd_match_pri); + rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); + if (rc < 0) { + netif_dbg(efx, probe, efx->net_dev, + "%s: fw flags %#x pri %u not supported in driver\n", + __func__, mcdi_flags, pd_match_pri); + } else { + netif_dbg(efx, probe, efx->net_dev, + "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", + __func__, mcdi_flags, pd_match_pri, + rc, table->rx_match_count); + table->rx_match_flags[table->rx_match_count++] = rc; + } + } + + table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); + if (!table->entry) { + rc = -ENOMEM; + goto fail; + } + + efx->filter_state = table; + init_waitqueue_head(&table->waitq); + return 0; + +fail: + kfree(table); + return rc; +} + +static void efx_ef10_filter_table_restore(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_filter_spec *spec; + unsigned int filter_idx; + bool failed = false; + int rc; + + if (!nic_data->must_restore_filters) + return; + + spin_lock_bh(&efx->filter_lock); + + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (!spec) + continue; + + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; + spin_unlock_bh(&efx->filter_lock); + + rc = efx_ef10_filter_push(efx, spec, + &table->entry[filter_idx].handle, + false); + if (rc) + failed = true; + + spin_lock_bh(&efx->filter_lock); + if (rc) { + kfree(spec); + efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } else { + table->entry[filter_idx].spec &= + ~EFX_EF10_FILTER_FLAG_BUSY; + } + } + + spin_unlock_bh(&efx->filter_lock); + + if (failed) + netif_err(efx, hw, efx->net_dev, + "unable to restore all filters\n"); + else + nic_data->must_restore_filters = false; +} + +static void efx_ef10_filter_table_remove(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + struct efx_filter_spec *spec; + unsigned int filter_idx; + int rc; + + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (!spec) + continue; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_REMOVE : + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + WARN_ON(rc != 0); + kfree(spec); + } + + vfree(table->entry); + kfree(table); +} + +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct efx_filter_spec spec; + bool remove_failed = false; + struct netdev_hw_addr *uc; + struct netdev_hw_addr *mc; + unsigned int filter_idx; + int i, n, rc; + + if (!efx_dev_registered(efx)) + return; + + /* Mark old filters that may need to be removed */ + spin_lock_bh(&efx->filter_lock); + n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count; + for (i = 0; i < n; i++) { + filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS; + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; + } + n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count; + for (i = 0; i < n; i++) { + filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS; + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; + } + spin_unlock_bh(&efx->filter_lock); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + if (net_dev->flags & IFF_PROMISC || + netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) { + table->stack_uc_count = -1; + } else { + table->stack_uc_count = 1 + netdev_uc_count(net_dev); + memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr, + ETH_ALEN); + i = 1; + netdev_for_each_uc_addr(uc, net_dev) { + memcpy(table->stack_uc_list[i].addr, + uc->addr, ETH_ALEN); + i++; + } + } + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || + netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) { + table->stack_mc_count = -1; + } else { + table->stack_mc_count = 1 + netdev_mc_count(net_dev); + eth_broadcast_addr(table->stack_mc_list[0].addr); + i = 1; + netdev_for_each_mc_addr(mc, net_dev) { + memcpy(table->stack_mc_list[i].addr, + mc->addr, ETH_ALEN); + i++; + } + } + netif_addr_unlock_bh(net_dev); + + /* Insert/renew unicast filters */ + if (table->stack_uc_count >= 0) { + for (i = 0; i < table->stack_uc_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, + EFX_FILTER_FLAG_RX_RSS | + EFX_FILTER_FLAG_RX_STACK, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + table->stack_uc_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + /* Fall back to unicast-promisc */ + while (i--) + efx_ef10_filter_remove_safe( + efx, EFX_FILTER_PRI_REQUIRED, + table->stack_uc_list[i].id); + table->stack_uc_count = -1; + break; + } + table->stack_uc_list[i].id = rc; + } + } + if (table->stack_uc_count < 0) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, + EFX_FILTER_FLAG_RX_RSS | + EFX_FILTER_FLAG_RX_STACK, + 0); + efx_filter_set_uc_def(&spec); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + WARN_ON(1); + table->stack_uc_count = 0; + } else { + table->stack_uc_list[0].id = rc; + } + } + + /* Insert/renew multicast filters */ + if (table->stack_mc_count >= 0) { + for (i = 0; i < table->stack_mc_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, + EFX_FILTER_FLAG_RX_RSS | + EFX_FILTER_FLAG_RX_STACK, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + table->stack_mc_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + /* Fall back to multicast-promisc */ + while (i--) + efx_ef10_filter_remove_safe( + efx, EFX_FILTER_PRI_REQUIRED, + table->stack_mc_list[i].id); + table->stack_mc_count = -1; + break; + } + table->stack_mc_list[i].id = rc; + } + } + if (table->stack_mc_count < 0) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, + EFX_FILTER_FLAG_RX_RSS | + EFX_FILTER_FLAG_RX_STACK, + 0); + efx_filter_set_mc_def(&spec); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + WARN_ON(1); + table->stack_mc_count = 0; + } else { + table->stack_mc_list[0].id = rc; + } + } + + /* Remove filters that weren't renewed. Since nothing else + * changes the STACK_OLD flag or removes these filters, we + * don't need to hold the filter_lock while scanning for + * these filters. + */ + for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { + if (ACCESS_ONCE(table->entry[i].spec) & + EFX_EF10_FILTER_FLAG_STACK_OLD) { + if (efx_ef10_filter_remove_internal(efx, + EFX_FILTER_PRI_REQUIRED, + i, true) < 0) + remove_failed = true; + } + } + WARN_ON(remove_failed); +} + +static int efx_ef10_mac_reconfigure(struct efx_nic *efx) +{ + efx_ef10_filter_sync_rx_mode(efx); + + return efx_mcdi_set_mac(efx); +} + +#ifdef CONFIG_SFC_MTD + +struct efx_ef10_nvram_type_info { + u16 type, type_mask; + u8 port; + const char *name; +}; + +static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { + { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, + { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, + { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, + { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, + { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, +}; + +static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *part, + unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); + const struct efx_ef10_nvram_type_info *info; + size_t size, erase_size, outlen; + bool protected; + int rc; + + for (info = efx_ef10_nvram_types; ; info++) { + if (info == + efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) + return -ENODEV; + if ((type & ~info->type_mask) == info->type) + break; + } + if (info->port != efx_port_num(efx)) + return -ENODEV; + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); + if (rc) + return rc; + if (protected) + return -ENODEV; /* hide it */ + + part->nvram_type = type; + + MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) + return -EIO; + if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & + (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) + part->fw_subtype = MCDI_DWORD(outbuf, + NVRAM_METADATA_OUT_SUBTYPE); + + part->common.dev_type_name = "EF10 NVRAM manager"; + part->common.type_name = info->name; + + part->common.mtd.type = MTD_NORFLASH; + part->common.mtd.flags = MTD_CAP_NORFLASH; + part->common.mtd.size = size; + part->common.mtd.erasesize = erase_size; + + return 0; +} + +static int efx_ef10_mtd_probe(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); + struct efx_mcdi_mtd_partition *parts; + size_t outlen, n_parts_total, i, n_parts; + unsigned int type; + int rc; + + ASSERT_RTNL(); + + BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) + return -EIO; + + n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); + if (n_parts_total > + MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) + return -EIO; + + parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + n_parts = 0; + for (i = 0; i < n_parts_total; i++) { + type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, + i); + rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); + if (rc == 0) + n_parts++; + else if (rc != -ENODEV) + goto fail; + } + + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); +fail: + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + +static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); +} + +const struct efx_nic_type efx_hunt_a0_nic_type = { + .mem_map_size = efx_ef10_mem_map_size, + .probe = efx_ef10_probe, + .remove = efx_ef10_remove, + .dimension_resources = efx_ef10_dimension_resources, + .init = efx_ef10_init_nic, + .fini = efx_port_dummy_op_void, + .map_reset_reason = efx_mcdi_map_reset_reason, + .map_reset_flags = efx_ef10_map_reset_flags, + .reset = efx_mcdi_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_ef10_fini_dmaq, + .describe_stats = efx_ef10_describe_stats, + .update_stats = efx_ef10_update_stats, + .start_stats = efx_mcdi_mac_start_stats, + .stop_stats = efx_mcdi_mac_stop_stats, + .set_id_led = efx_mcdi_set_id_led, + .push_irq_moderation = efx_ef10_push_irq_moderation, + .reconfigure_mac = efx_ef10_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = efx_ef10_get_wol, + .set_wol = efx_ef10_set_wol, + .resume_wol = efx_port_dummy_op_void, + /* TODO: test_chip */ + .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = efx_ef10_mcdi_request, + .mcdi_poll_response = efx_ef10_mcdi_poll_response, + .mcdi_read_response = efx_ef10_mcdi_read_response, + .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef10_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .irq_handle_msi = efx_ef10_msi_interrupt, + .irq_handle_legacy = efx_ef10_legacy_interrupt, + .tx_probe = efx_ef10_tx_probe, + .tx_init = efx_ef10_tx_init, + .tx_remove = efx_ef10_tx_remove, + .tx_write = efx_ef10_tx_write, + .rx_push_indir_table = efx_ef10_rx_push_indir_table, + .rx_probe = efx_ef10_rx_probe, + .rx_init = efx_ef10_rx_init, + .rx_remove = efx_ef10_rx_remove, + .rx_write = efx_ef10_rx_write, + .rx_defer_refill = efx_ef10_rx_defer_refill, + .ev_probe = efx_ef10_ev_probe, + .ev_init = efx_ef10_ev_init, + .ev_fini = efx_ef10_ev_fini, + .ev_remove = efx_ef10_ev_remove, + .ev_process = efx_ef10_ev_process, + .ev_read_ack = efx_ef10_ev_read_ack, + .ev_test_generate = efx_ef10_ev_test_generate, + .filter_table_probe = efx_ef10_filter_table_probe, + .filter_table_restore = efx_ef10_filter_table_restore, + .filter_table_remove = efx_ef10_filter_table_remove, + .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, + .filter_insert = efx_ef10_filter_insert, + .filter_remove_safe = efx_ef10_filter_remove_safe, + .filter_get_safe = efx_ef10_filter_get_safe, + .filter_clear_rx = efx_ef10_filter_clear_rx, + .filter_count_rx_used = efx_ef10_filter_count_rx_used, + .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_insert = efx_ef10_filter_rfs_insert, + .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = efx_ef10_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif + .ptp_write_host_time = efx_ef10_ptp_write_host_time, + + .revision = EFX_REV_HUNT_A0, + .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, + .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .can_rx_scatter = true, + .always_rx_scatter = true, + .max_interrupt_mode = EFX_INT_MODE_MSIX, + .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, + .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXHASH | NETIF_F_NTUPLE), + .mcdi_max_ver = 2, + .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, +}; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 34788fb..5b66c5e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2075,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return 0; } -static const struct net_device_ops efx_netdev_ops = { +static const struct net_device_ops efx_farch_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, .ndo_get_stats64 = efx_net_stats, @@ -2102,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = { #endif }; +static const struct net_device_ops efx_ef10_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, + .ndo_get_stats64 = efx_net_stats, + .ndo_tx_timeout = efx_watchdog, + .ndo_start_xmit = efx_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = efx_ioctl, + .ndo_change_mtu = efx_change_mtu, + .ndo_set_mac_address = efx_set_mac_address, + .ndo_set_rx_mode = efx_set_rx_mode, + .ndo_set_features = efx_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = efx_netpoll, +#endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif +}; + static void efx_update_name(struct efx_nic *efx) { strcpy(efx->name, efx->net_dev->name); @@ -2114,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this, { struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); - if (net_dev->netdev_ops == &efx_netdev_ops && + if ((net_dev->netdev_ops == &efx_farch_netdev_ops || + net_dev->netdev_ops == &efx_ef10_netdev_ops) && event == NETDEV_CHANGENAME) efx_update_name(netdev_priv(net_dev)); @@ -2141,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; - net_dev->netdev_ops = &efx_netdev_ops; + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { + net_dev->netdev_ops = &efx_ef10_netdev_ops; + net_dev->priv_flags |= IFF_UNICAST_FLT; + } else { + net_dev->netdev_ops = &efx_farch_netdev_ops; + } SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; @@ -2463,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { .driver_data = (unsigned long) &siena_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ .driver_data = (unsigned long) &siena_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, {0} /* end of list */ }; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 6354693..78ddb48 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -77,6 +77,8 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets), }; #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d8a20f5..272f201 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -852,6 +852,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, "MC Scheduler error address=0x%x\n", data); break; case MCDI_EVENT_CODE_REBOOT: + case MCDI_EVENT_CODE_MC_REBOOT: netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); efx_mcdi_ev_death(efx, -EIO); break; @@ -866,7 +867,19 @@ void efx_mcdi_process_event(struct efx_channel *channel, case MCDI_EVENT_CODE_PTP_PPS: efx_ptp_event(efx, event); break; - + case MCDI_EVENT_CODE_TX_FLUSH: + case MCDI_EVENT_CODE_RX_FLUSH: + /* Two flush events will be sent: one to the same event + * queue as completions, and one to event queue 0. + * In the latter case the {RX,TX}_FLUSH_TO_DRIVER + * flag will be set, and we should ignore the event + * because we want to wait for all completions. + */ + BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != + MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); + if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) + efx_ef10_handle_drain_event(efx); + break; case MCDI_EVENT_CODE_TX_ERR: case MCDI_EVENT_CODE_RX_ERR: netif_err(efx, hw, efx->net_dev, @@ -890,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel, void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) { - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, + max(MC_CMD_GET_VERSION_OUT_LEN, + MC_CMD_GET_CAPABILITIES_OUT_LEN)); size_t outlength; const __le16 *ver_words; + size_t offset; int rc; BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, outbuf, sizeof(outbuf), &outlength); if (rc) goto fail; - if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { rc = -EIO; goto fail; } ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); - snprintf(buf, len, "%u.%u.%u.%u", - le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), - le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); + offset = snprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); + + /* EF10 may have multiple datapath firmware variants within a + * single version. Report which variants are running. + */ + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN) + offset += snprintf( + buf + offset, len - offset, " rx? tx?"); + else + offset += snprintf( + buf + offset, len - offset, " rx%x tx%x", + MCDI_WORD(outbuf, + GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID), + MCDI_WORD(outbuf, + GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID)); + + /* It's theoretically possible for the string to exceed 31 + * characters, though in practice the first three version + * components are short enough that this doesn't happen. + */ + if (WARN_ON(offset >= len)) + buf[0] = 0; + } + return; fail: @@ -1430,6 +1471,17 @@ fail: return rc; } +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + + BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); + return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + #ifdef CONFIG_SFC_MTD #define EFX_MCDI_NVRAM_LEN_MAX 128 diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 0ca00a6..5919aca 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -81,7 +81,7 @@ struct efx_mcdi_mon { struct efx_mcdi_mtd_partition { struct efx_mtd_partition common; bool updating; - u8 nvram_type; + u16 nvram_type; u16 fw_subtype; }; @@ -157,6 +157,9 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define _MCDI_DWORD(_buf, _field) \ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) +#define MCDI_WORD(_buf, _field) \ + ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) #define MCDI_SET_DWORD(_buf, _field, _value) \ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) #define MCDI_DWORD(_buf, _field) \ @@ -293,6 +296,8 @@ extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); extern int efx_mcdi_port_probe(struct efx_nic *efx); extern void efx_mcdi_port_remove(struct efx_nic *efx); extern int efx_mcdi_port_reconfigure(struct efx_nic *efx); +extern int efx_mcdi_port_get_number(struct efx_nic *efx); +extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); extern int efx_mcdi_set_mac(struct efx_nic *efx); #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) @@ -301,6 +306,7 @@ extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx); extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); +extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); #ifdef CONFIG_SFC_MCDI_MON extern int efx_mcdi_mon_probe(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 42d52f3..03faf2f 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -830,6 +830,13 @@ static const struct efx_phy_operations efx_mcdi_phy_ops = { .get_module_info = efx_mcdi_phy_get_module_info, }; +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + return phy_data->supported_cap; +} + static unsigned int efx_mcdi_event_link_speed[] = { [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, @@ -1004,3 +1011,17 @@ void efx_mcdi_port_remove(struct efx_nic *efx) efx->phy_op->remove(efx); efx_nic_free_buffer(efx, &efx->stats_buffer); } + +/* Get physical port number (EF10 only; on Siena it is same as PF number) */ +int efx_mcdi_port_get_number(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT); +} diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d1aa5dc..753df15 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -39,7 +39,7 @@ * **************************************************************************/ -#define EFX_DRIVER_VERSION "3.2" +#define EFX_DRIVER_VERSION "4.0" #ifdef DEBUG #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) @@ -389,6 +389,8 @@ enum efx_rx_alloc_method { * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to * lack of descriptors + * @n_rx_merge_events: Number of RX merged completion events + * @n_rx_merge_packets: Number of RX packets completed by merged events * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by * __efx_rx_packet(), or zero if there is none * @rx_pkt_index: Ring index of first buffer for next packet to be delivered @@ -425,6 +427,8 @@ struct efx_channel { unsigned n_rx_overlength; unsigned n_skbuff_leaks; unsigned int n_rx_nodesc_trunc; + unsigned int n_rx_merge_events; + unsigned int n_rx_merge_packets; unsigned int rx_pkt_n_frags; unsigned int rx_pkt_index; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 686ce7a..4bd5359 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -17,15 +17,12 @@ #include "efx.h" #include "mcdi.h" -/* - * Falcon hardware control - */ - enum { EFX_REV_FALCON_A0 = 0, EFX_REV_FALCON_A1 = 1, EFX_REV_FALCON_B0 = 2, EFX_REV_SIENA_A0 = 3, + EFX_REV_HUNT_A0 = 4, }; static inline int efx_nic_rev(struct efx_nic *efx) @@ -347,6 +344,78 @@ struct siena_nic_data { u64 stats[SIENA_STAT_COUNT]; }; +enum { + EF10_STAT_tx_bytes, + EF10_STAT_tx_packets, + EF10_STAT_tx_pause, + EF10_STAT_tx_control, + EF10_STAT_tx_unicast, + EF10_STAT_tx_multicast, + EF10_STAT_tx_broadcast, + EF10_STAT_tx_lt64, + EF10_STAT_tx_64, + EF10_STAT_tx_65_to_127, + EF10_STAT_tx_128_to_255, + EF10_STAT_tx_256_to_511, + EF10_STAT_tx_512_to_1023, + EF10_STAT_tx_1024_to_15xx, + EF10_STAT_tx_15xx_to_jumbo, + EF10_STAT_rx_bytes, + EF10_STAT_rx_bytes_minus_good_bytes, + EF10_STAT_rx_good_bytes, + EF10_STAT_rx_bad_bytes, + EF10_STAT_rx_packets, + EF10_STAT_rx_good, + EF10_STAT_rx_bad, + EF10_STAT_rx_pause, + EF10_STAT_rx_control, + EF10_STAT_rx_unicast, + EF10_STAT_rx_multicast, + EF10_STAT_rx_broadcast, + EF10_STAT_rx_lt64, + EF10_STAT_rx_64, + EF10_STAT_rx_65_to_127, + EF10_STAT_rx_128_to_255, + EF10_STAT_rx_256_to_511, + EF10_STAT_rx_512_to_1023, + EF10_STAT_rx_1024_to_15xx, + EF10_STAT_rx_15xx_to_jumbo, + EF10_STAT_rx_gtjumbo, + EF10_STAT_rx_bad_gtjumbo, + EF10_STAT_rx_overflow, + EF10_STAT_rx_align_error, + EF10_STAT_rx_length_error, + EF10_STAT_rx_nodesc_drops, + EF10_STAT_COUNT +}; + +/** + * struct efx_ef10_nic_data - EF10 architecture NIC state + * @mcdi_buf: DMA buffer for MCDI + * @warm_boot_count: Last seen MC warm boot count + * @vi_base: Absolute index of first VI in this function + * @n_allocated_vis: Number of VIs allocated to this function + * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot + * @must_restore_filters: Flag: filters have yet to be restored after MC reboot + * @rx_rss_context: Firmware handle for our RSS context + * @stats: Hardware statistics + * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of + * %MC_CMD_GET_CAPABILITIES response) + */ +struct efx_ef10_nic_data { + struct efx_buffer mcdi_buf; + u16 warm_boot_count; + unsigned int vi_base; + unsigned int n_allocated_vis; + bool must_realloc_vis; + bool must_restore_filters; + u32 rx_rss_context; + u64 stats[EF10_STAT_COUNT]; + bool workaround_35388; + u32 datapath_caps; +}; + /* * On the SFC9000 family each port is associated with 1 PCI physical * function (PF) handled by sfc and a configurable number of virtual @@ -448,6 +517,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); extern const struct efx_nic_type falcon_a1_nic_type; extern const struct efx_nic_type falcon_b0_nic_type; extern const struct efx_nic_type siena_a0_nic_type; +extern const struct efx_nic_type efx_hunt_a0_nic_type; /************************************************************************** * @@ -627,6 +697,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx); extern int falcon_reset_xaui(struct efx_nic *efx); extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); extern void efx_farch_init_common(struct efx_nic *efx); +extern void efx_ef10_handle_drain_event(struct efx_nic *efx); static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) { efx->type->rx_push_indir_table(efx); diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index 7e5be1d..8848ec0 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -44,4 +44,10 @@ /* Leak overlength packets rather than free */ #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A +/* Lockup when writing event block registers at gen2/gen3 */ +#define EFX_EF10_WORKAROUND_35388(efx) \ + (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388) +#define EFX_WORKAROUND_35388(efx) \ + (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx)) + #endif /* EFX_WORKAROUNDS_H */ -- cgit v0.10.2 From 03536cc3ad611f29a35803d37a44414b823135c3 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 26 Aug 2013 21:11:57 +0800 Subject: net: mdio-sun4i: Convert to devm_* api Use devm_ioremap_resource instead of of_iomap() and devm_kzalloc() instead of kmalloc() to make cleanup paths simpler. This patch also fixes the resource leak caused by missing corresponding iounamp() of the of_iomap(). Signed-off-by: Jisheng Zhang Acked-by: Maxime Ripard Signed-off-by: David S. Miller diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 7f25e49..18969b3 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -101,6 +101,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct sun4i_mdio_data *data; + struct resource *res; int ret, i; bus = mdiobus_alloc_size(sizeof(*data)); @@ -114,7 +115,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); bus->parent = &pdev->dev; - bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, + GFP_KERNEL); if (!bus->irq) { ret = -ENOMEM; goto err_out_free_mdiobus; @@ -124,10 +126,11 @@ static int sun4i_mdio_probe(struct platform_device *pdev) bus->irq[i] = PHY_POLL; data = bus->priv; - data->membase = of_iomap(np, 0); - if (!data->membase) { - ret = -ENOMEM; - goto err_out_free_mdio_irq; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->membase)) { + ret = PTR_ERR(data->membase); + goto err_out_free_mdiobus; } data->regulator = devm_regulator_get(&pdev->dev, "phy"); @@ -139,7 +142,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev) } else { ret = regulator_enable(data->regulator); if (ret) - goto err_out_free_mdio_irq; + goto err_out_free_mdiobus; } ret = of_mdiobus_register(bus, np); @@ -152,8 +155,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev) err_out_disable_regulator: regulator_disable(data->regulator); -err_out_free_mdio_irq: - kfree(bus->irq); err_out_free_mdiobus: mdiobus_free(bus); return ret; @@ -164,7 +165,6 @@ static int sun4i_mdio_remove(struct platform_device *pdev) struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); - kfree(bus->irq); mdiobus_free(bus); return 0; -- cgit v0.10.2 From 76bfd8984430be5b19727d09f6ab5e1d7a247f8c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 26 Aug 2013 16:34:00 +0200 Subject: net: sctp: reorder sctp_globals to reduce cacheline usage Reduce cacheline usage from 2 to 1 cacheline for sctp_globals structure. By reordering elements, we can close gaps and simply achieve the following: Current situation: /* size: 80, cachelines: 2, members: 10 */ /* sum members: 57, holes: 4, sum holes: 16 */ /* padding: 7 */ /* last cacheline: 16 bytes */ Afterwards: /* size: 64, cachelines: 1, members: 10 */ /* padding: 7 */ Signed-off-by: Daniel Borkmann Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 422db6c..2174d8d 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -113,29 +113,27 @@ struct sctp_hashbucket { /* The SCTP globals structure. */ extern struct sctp_globals { - /* The following variables are implementation specific. */ - - /* Default initialization values to be applied to new associations. */ - __u16 max_instreams; - __u16 max_outstreams; - /* This is a list of groups of functions for each address * family that we support. */ struct list_head address_families; /* This is the hash of all endpoints. */ - int ep_hashsize; struct sctp_hashbucket *ep_hashtable; - /* This is the hash of all associations. */ - int assoc_hashsize; struct sctp_hashbucket *assoc_hashtable; - /* This is the sctp port control hash. */ - int port_hashsize; struct sctp_bind_hashbucket *port_hashtable; + /* Sizes of above hashtables. */ + int ep_hashsize; + int assoc_hashsize; + int port_hashsize; + + /* Default initialization values to be applied to new associations. */ + __u16 max_instreams; + __u16 max_outstreams; + /* Flag to indicate whether computing and verifying checksum * is disabled. */ bool checksum_disable; -- cgit v0.10.2 From fd094808a06e290432fc13c09aae808aea34d2ca Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 27 Aug 2013 12:03:53 +0100 Subject: bridge: inherit slave devices needed_headroom Some slave devices may have set a dev->needed_headroom value which is different than the default one, most likely in order to prepend a hardware descriptor in front of the Ethernet frame to send. Whenever a new slave is added to a bridge, ensure that we update the needed_headroom value accordingly to account for the slave needed_headroom value. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index aa6c9a8..c41d5fb 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -383,6 +383,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) netdev_update_features(br->dev); + if (br->dev->needed_headroom < dev->needed_headroom) + br->dev->needed_headroom = dev->needed_headroom; + spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); -- cgit v0.10.2 From 7daa78e352fec0f638e1e665365779678845fd66 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Tue, 27 Aug 2013 14:36:14 +0200 Subject: net/cadence/macb: fix invalid 0 return if no phy is discovered on mii init Replace misleading -1 (-EPERM) by a more appropriate return code (-ENXIO) in macb_mii_probe function. Save macb_mii_probe return before branching to err_out_unregister to avoid erronous 0 return. Signed-off-by: Boris BREZILLON Acked-by: Nicolas Ferre Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index fe06ab0..7660c45 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -276,7 +276,7 @@ static int macb_mii_probe(struct net_device *dev) phydev = phy_find_first(bp->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); - return -1; + return -ENXIO; } pdata = dev_get_platdata(&bp->pdev->dev); @@ -379,9 +379,9 @@ int macb_mii_init(struct macb *bp) if (err) goto err_out_free_mdio_irq; - if (macb_mii_probe(bp->dev) != 0) { + err = macb_mii_probe(bp->dev); + if (err) goto err_out_unregister_bus; - } return 0; -- cgit v0.10.2 From a3a975b1dfe999f3e5d38d38f2387894c4332d96 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Tue, 27 Aug 2013 14:41:53 +0200 Subject: ARM: at91/dt: fix phy address in sama5xmb to match the reg property Fix phy0 address to match the reg property defined in phy0 node. Signed-off-by: Boris BREZILLON Acked-by: Nicolas Ferre Signed-off-by: David S. Miller diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi index e9521d5..dba739b 100644 --- a/arch/arm/boot/dts/sama5d3xmb.dtsi +++ b/arch/arm/boot/dts/sama5d3xmb.dtsi @@ -84,7 +84,7 @@ #address-cells = <1>; #size-cells = <0>; - phy0: ethernet-phy@0 { + phy0: ethernet-phy@1 { interrupt-parent = <&pioE>; interrupts = <30 IRQ_TYPE_EDGE_FALLING>; reg = <1>; -- cgit v0.10.2 From b800c3b966bcf004bd8592293a49ed5cb7ea67a9 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Tue, 27 Aug 2013 01:36:51 +0200 Subject: ipv6: drop fragmented ndisc packets by default (RFC 6980) This patch implements RFC6980: Drop fragmented ndisc packets by default. If a fragmented ndisc packet is received the user is informed that it is possible to disable the check. Cc: Fernando Gont Cc: YOSHIFUJI Hideaki Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index debfe85..a2be556 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1349,6 +1349,12 @@ mldv2_unsolicited_report_interval - INTEGER MLDv2 report retransmit will take place. Default: 1000 (1 second) +suppress_frag_ndisc - INTEGER + Control RFC 6980 (Security Implications of IPv6 Fragmentation + with IPv6 Neighbor Discovery) behavior: + 1 - (default) discard fragmented neighbor discovery packets + 0 - allow fragmented neighbor discovery packets + icmp/*: ratelimit - INTEGER Limit the maximal rates for sending ICMPv6 packets. diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 9ac5047..28ea384 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -50,6 +50,7 @@ struct ipv6_devconf { __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; + __s32 suppress_frag_ndisc; void *sysctl; }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index d07ac69..593b0e3 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -162,6 +162,7 @@ enum { DEVCONF_NDISC_NOTIFY, DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL, DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL, + DEVCONF_SUPPRESS_FRAG_NDISC, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2d6d179..a7183fc 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -204,6 +204,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .accept_source_route = 0, /* we do not accept RH0 by default. */ .disable_ipv6 = 0, .accept_dad = 1, + .suppress_frag_ndisc = 1, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -241,6 +242,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .accept_source_route = 0, /* we do not accept RH0 by default. */ .disable_ipv6 = 0, .accept_dad = 1, + .suppress_frag_ndisc = 1, }; /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ @@ -4188,6 +4190,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify; + array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc; } static inline size_t inet6_ifla6_size(void) @@ -5002,6 +5005,13 @@ static struct addrconf_sysctl_table .proc_handler = proc_dointvec }, { + .procname = "suppress_frag_ndisc", + .data = &ipv6_devconf.suppress_frag_ndisc, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { /* sentinel */ } }, diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 04d31c2..41720fe 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1519,10 +1519,27 @@ static void pndisc_redo(struct sk_buff *skb) kfree_skb(skb); } +static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb) +{ + struct inet6_dev *idev = __in6_dev_get(skb->dev); + + if (!idev) + return true; + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED && + idev->cnf.suppress_frag_ndisc) { + net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n"); + return true; + } + return false; +} + int ndisc_rcv(struct sk_buff *skb) { struct nd_msg *msg; + if (ndisc_suppress_frag_ndisc(skb)) + return 0; + if (skb_linearize(skb)) return 0; -- cgit v0.10.2 From 95bd09eb27507691520d39ee1044d6ad831c1168 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 27 Aug 2013 05:46:32 -0700 Subject: tcp: TSO packets automatic sizing After hearing many people over past years complaining against TSO being bursty or even buggy, we are proud to present automatic sizing of TSO packets. One part of the problem is that tcp_tso_should_defer() uses an heuristic relying on upcoming ACKS instead of a timer, but more generally, having big TSO packets makes little sense for low rates, as it tends to create micro bursts on the network, and general consensus is to reduce the buffering amount. This patch introduces a per socket sk_pacing_rate, that approximates the current sending rate, and allows us to size the TSO packets so that we try to send one packet every ms. This field could be set by other transports. Patch has no impact for high speed flows, where having large TSO packets makes sense to reach line rate. For other flows, this helps better packet scheduling and ACK clocking. This patch increases performance of TCP flows in lossy environments. A new sysctl (tcp_min_tso_segs) is added, to specify the minimal size of a TSO packet (default being 2). A follow-up patch will provide a new packet scheduler (FQ), using sk_pacing_rate as an input to perform optional per flow pacing. This explains why we chose to set sk_pacing_rate to twice the current rate, allowing 'slow start' ramp up. sk_pacing_rate = 2 * cwnd * mss / srtt v2: Neal Cardwell reported a suspect deferring of last two segments on initial write of 10 MSS, I had to change tcp_tso_should_defer() to take into account tp->xmit_size_goal_segs Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Yuchung Cheng Cc: Van Jacobson Cc: Tom Herbert Acked-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index a2be556..1cb3aeb 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -482,6 +482,15 @@ tcp_syn_retries - INTEGER tcp_timestamps - BOOLEAN Enable timestamps as defined in RFC1323. +tcp_min_tso_segs - INTEGER + Minimal number of segments per TSO frame. + Since linux-3.12, TCP does an automatic sizing of TSO frames, + depending on flow rate, instead of filling 64Kbytes packets. + For specific usages, it's possible to force TCP to build big + TSO frames. Note that TCP stack might split too big TSO packets + if available window is too small. + Default: 2 + tcp_tso_win_divisor - INTEGER This allows control over what percentage of the congestion window can be consumed by a single TSO frame. diff --git a/include/net/sock.h b/include/net/sock.h index e4bbcbf..6ba2e7b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -232,6 +232,7 @@ struct cg_proto; * @sk_napi_id: id of the last napi context to receive data for sk * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode + * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings @@ -361,6 +362,7 @@ struct sock { kmemcheck_bitfield_end(flags); int sk_wmem_queued; gfp_t sk_allocation; + u32 sk_pacing_rate; /* bytes per second */ netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; diff --git a/include/net/tcp.h b/include/net/tcp.h index dd5e16f..6a6a88d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -281,6 +281,7 @@ extern int sysctl_tcp_early_retrans; extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; extern unsigned int sysctl_tcp_notsent_lowat; +extern int sysctl_tcp_min_tso_segs; extern atomic_long_t tcp_memory_allocated; extern struct percpu_counter tcp_sockets_allocated; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 8ed7c32..540279f 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -29,6 +29,7 @@ static int zero; static int one = 1; static int four = 4; +static int gso_max_segs = GSO_MAX_SEGS; static int tcp_retr1_max = 255; static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; @@ -761,6 +762,15 @@ static struct ctl_table ipv4_table[] = { .extra2 = &four, }, { + .procname = "tcp_min_tso_segs", + .data = &sysctl_tcp_min_tso_segs, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &gso_max_segs, + }, + { .procname = "udp_mem", .data = &sysctl_udp_mem, .maxlen = sizeof(sysctl_udp_mem), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4e42c03..fdf7409 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -283,6 +283,8 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; +int sysctl_tcp_min_tso_segs __read_mostly = 2; + struct percpu_counter tcp_orphan_count; EXPORT_SYMBOL_GPL(tcp_orphan_count); @@ -785,12 +787,28 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, xmit_size_goal = mss_now; if (large_allowed && sk_can_gso(sk)) { - xmit_size_goal = ((sk->sk_gso_max_size - 1) - - inet_csk(sk)->icsk_af_ops->net_header_len - - inet_csk(sk)->icsk_ext_hdr_len - - tp->tcp_header_len); + u32 gso_size, hlen; + + /* Maybe we should/could use sk->sk_prot->max_header here ? */ + hlen = inet_csk(sk)->icsk_af_ops->net_header_len + + inet_csk(sk)->icsk_ext_hdr_len + + tp->tcp_header_len; + + /* Goal is to send at least one packet per ms, + * not one big TSO packet every 100 ms. + * This preserves ACK clocking and is consistent + * with tcp_tso_should_defer() heuristic. + */ + gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC); + gso_size = max_t(u32, gso_size, + sysctl_tcp_min_tso_segs * mss_now); + + xmit_size_goal = min_t(u32, gso_size, + sk->sk_gso_max_size - 1 - hlen); - /* TSQ : try to have two TSO segments in flight */ + /* TSQ : try to have at least two segments in flight + * (one in NIC TX ring, another in Qdisc) + */ xmit_size_goal = min_t(u32, xmit_size_goal, sysctl_tcp_limit_output_bytes >> 1); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ec492ea..1a84fff 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -688,6 +688,34 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) } } +/* Set the sk_pacing_rate to allow proper sizing of TSO packets. + * Note: TCP stack does not yet implement pacing. + * FQ packet scheduler can be used to implement cheap but effective + * TCP pacing, to smooth the burst on large writes when packets + * in flight is significantly lower than cwnd (or rwin) + */ +static void tcp_update_pacing_rate(struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + u64 rate; + + /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ + rate = (u64)tp->mss_cache * 2 * (HZ << 3); + + rate *= max(tp->snd_cwnd, tp->packets_out); + + /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3), + * be conservative and assume srtt = 1 (125 us instead of 1.25 ms) + * We probably need usec resolution in the future. + * Note: This also takes care of possible srtt=0 case, + * when tcp_rtt_estimator() was not yet called. + */ + if (tp->srtt > 8 + 2) + do_div(rate, tp->srtt); + + sk->sk_pacing_rate = min_t(u64, rate, ~0U); +} + /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ @@ -3278,7 +3306,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; - u32 prior_in_flight; + u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt; u32 prior_fackets; int prior_packets = tp->packets_out; const int prior_unsacked = tp->packets_out - tp->sacked_out; @@ -3383,6 +3411,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (icsk->icsk_pending == ICSK_TIME_RETRANS) tcp_schedule_loss_probe(sk); + if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd) + tcp_update_pacing_rate(sk); return 1; no_queue: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 884efff..e63ae4c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1631,7 +1631,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) /* If a full-sized TSO skb can be sent, do it. */ if (limit >= min_t(unsigned int, sk->sk_gso_max_size, - sk->sk_gso_max_segs * tp->mss_cache)) + tp->xmit_size_goal_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ -- cgit v0.10.2 From 7613f5fe11c518c16b6b50dabb4964052766b73b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 27 Aug 2013 16:53:52 +0200 Subject: net: sctp: sctp_verify_init: clean up mandatory checks and add comment Add a comment related to RFC4960 explaning why we do not check for initial TSN, and while at it, remove yoda notation checks and clean up code from checks of mandatory conditions. That's probably just really minor, but makes reviewing easier. Signed-off-by: Daniel Borkmann Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 01e9783..d244a23 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2240,25 +2240,23 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc, struct sctp_chunk **errp) { union sctp_params param; - int has_cookie = 0; + bool has_cookie = false; int result; - /* Verify stream values are non-zero. */ - if ((0 == peer_init->init_hdr.num_outbound_streams) || - (0 == peer_init->init_hdr.num_inbound_streams) || - (0 == peer_init->init_hdr.init_tag) || - (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) { - + /* Check for missing mandatory parameters. Note: Initial TSN is + * also mandatory, but is not checked here since the valid range + * is 0..2**32-1. RFC4960, section 3.3.3. + */ + if (peer_init->init_hdr.num_outbound_streams == 0 || + peer_init->init_hdr.num_inbound_streams == 0 || + peer_init->init_hdr.init_tag == 0 || + ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW) return sctp_process_inv_mandatory(asoc, chunk, errp); - } - /* Check for missing mandatory parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { - - if (SCTP_PARAM_STATE_COOKIE == param.p->type) - has_cookie = 1; - - } /* for (loop through all parameters) */ + if (param.p->type == SCTP_PARAM_STATE_COOKIE) + has_cookie = true; + } /* There is a possibility that a parameter length was bad and * in that case we would have stoped walking the parameters. -- cgit v0.10.2 From 322555f52bec948a3aa6b526453f66bb63b6e283 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 27 Aug 2013 17:35:08 -0300 Subject: fec: Use NAPI_POLL_WEIGHT Instead of using a custom 'FEC_NAPI_WEIGHT', just use the generic 'NAPI_POLL_WEIGHT' definition instead. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index fdf9307..c88a129 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -69,7 +69,6 @@ static void set_multicast_list(struct net_device *ndev); #endif #define DRIVER_NAME "fec" -#define FEC_NAPI_WEIGHT 64 /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) @@ -1981,7 +1980,7 @@ static int fec_enet_init(struct net_device *ndev) ndev->ethtool_ops = &fec_enet_ethtool_ops; writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); - netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); + netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { /* enable hw VLAN support */ -- cgit v0.10.2 From aa9d85605f5ab070b64842b3eba797cf81698ae1 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:04 +0200 Subject: net: rename netdev_upper to netdev_adjacent Rename the structure to reflect the upcoming addition of lower_dev_list. CC: "David S. Miller" CC: Eric Dumazet CC: Jiri Pirko CC: Alexander Duyck CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 1ed2b66..5072e2c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4367,7 +4367,7 @@ softnet_break: goto out; } -struct netdev_upper { +struct netdev_adjacent { struct net_device *dev; bool master; struct list_head list; @@ -4378,7 +4378,7 @@ struct netdev_upper { static void __append_search_uppers(struct list_head *search_list, struct net_device *dev) { - struct netdev_upper *upper; + struct netdev_adjacent *upper; list_for_each_entry(upper, &dev->upper_dev_list, list) { /* check if this upper is not already in search list */ @@ -4391,8 +4391,8 @@ static bool __netdev_search_upper_dev(struct net_device *dev, struct net_device *upper_dev) { LIST_HEAD(search_list); - struct netdev_upper *upper; - struct netdev_upper *tmp; + struct netdev_adjacent *upper; + struct netdev_adjacent *tmp; bool ret = false; __append_search_uppers(&search_list, dev); @@ -4408,10 +4408,10 @@ static bool __netdev_search_upper_dev(struct net_device *dev, return ret; } -static struct netdev_upper *__netdev_find_upper(struct net_device *dev, +static struct netdev_adjacent *__netdev_find_upper(struct net_device *dev, struct net_device *upper_dev) { - struct netdev_upper *upper; + struct netdev_adjacent *upper; list_for_each_entry(upper, &dev->upper_dev_list, list) { if (upper->dev == upper_dev) @@ -4462,7 +4462,7 @@ EXPORT_SYMBOL(netdev_has_any_upper_dev); */ struct net_device *netdev_master_upper_dev_get(struct net_device *dev) { - struct netdev_upper *upper; + struct netdev_adjacent *upper; ASSERT_RTNL(); @@ -4470,7 +4470,7 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev) return NULL; upper = list_first_entry(&dev->upper_dev_list, - struct netdev_upper, list); + struct netdev_adjacent, list); if (likely(upper->master)) return upper->dev; return NULL; @@ -4486,10 +4486,10 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get); */ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) { - struct netdev_upper *upper; + struct netdev_adjacent *upper; upper = list_first_or_null_rcu(&dev->upper_dev_list, - struct netdev_upper, list); + struct netdev_adjacent, list); if (upper && likely(upper->master)) return upper->dev; return NULL; @@ -4499,7 +4499,7 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); static int __netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, bool master) { - struct netdev_upper *upper; + struct netdev_adjacent *upper; ASSERT_RTNL(); @@ -4580,7 +4580,7 @@ EXPORT_SYMBOL(netdev_master_upper_dev_link); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev) { - struct netdev_upper *upper; + struct netdev_adjacent *upper; ASSERT_RTNL(); -- cgit v0.10.2 From 5d261913ca3daf6c2d21d38924235667b3d07c40 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:05 +0200 Subject: net: add lower_dev_list to net_device and make a full mesh This patch adds lower_dev_list list_head to net_device, which is the same as upper_dev_list, only for lower devices, and begins to use it in the same way as the upper list. It also changes the way the whole adjacent device lists work - now they contain *all* of upper/lower devices, not only the first level. The first level devices are distinguished by the bool neighbour field in netdev_adjacent, also added by this patch. There are cases when a device can be added several times to the adjacent list, the simplest would be: /---- eth0.10 ---\ eth0- --- bond0 \---- eth0.20 ---/ where both bond0 and eth0 'see' each other in the adjacent lists two times. To avoid duplication of netdev_adjacent structures ref_nr is being kept as the number of times the device was added to the list. The 'full view' is achieved by adding, on link creation, all of the upper_dev's upper_dev_list devices as upper devices to all of the lower_dev's lower_dev_list devices (and to the lower_dev itself), and vice versa. On unlink they are removed using the same logic. I've tested it with thousands vlans/bonds/bridges, everything works ok and no observable lags even on a huge number of interfaces. Memory footprint for 128 devices interconnected with each other via both upper and lower (which is impossible, but for the comparison) lists would be: 128*128*2*sizeof(netdev_adjacent) = 1.5MB but in the real world we usualy have at most several devices with slaves and a lot of vlans, so the footprint will be much lower. CC: "David S. Miller" CC: Eric Dumazet CC: Jiri Pirko CC: Alexander Duyck CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 077363d..5ccf5b7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1125,6 +1125,7 @@ struct net_device { struct list_head napi_list; struct list_head unreg_list; struct list_head upper_dev_list; /* List of upper devices */ + struct list_head lower_dev_list; /* currently active device features */ diff --git a/net/core/dev.c b/net/core/dev.c index 5072e2c..2aa914e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4369,7 +4369,16 @@ softnet_break: struct netdev_adjacent { struct net_device *dev; + + /* upper master flag, there can only be one master device per list */ bool master; + + /* indicates that this dev is our first-level lower/upper device */ + bool neighbour; + + /* counter for the number of times this device was added to us */ + u16 ref_nr; + struct list_head list; struct rcu_head rcu; struct list_head search_list; @@ -4408,18 +4417,34 @@ static bool __netdev_search_upper_dev(struct net_device *dev, return ret; } -static struct netdev_adjacent *__netdev_find_upper(struct net_device *dev, - struct net_device *upper_dev) +static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev, + struct net_device *adj_dev, + bool upper) { - struct netdev_adjacent *upper; + struct netdev_adjacent *adj; + struct list_head *dev_list; - list_for_each_entry(upper, &dev->upper_dev_list, list) { - if (upper->dev == upper_dev) - return upper; + dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list; + + list_for_each_entry(adj, dev_list, list) { + if (adj->dev == adj_dev) + return adj; } return NULL; } +static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev, + struct net_device *udev) +{ + return __netdev_find_adj(dev, udev, true); +} + +static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev, + struct net_device *ldev) +{ + return __netdev_find_adj(dev, ldev, false); +} + /** * netdev_has_upper_dev - Check if device is linked to an upper device * @dev: device @@ -4496,10 +4521,149 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) } EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); +static int __netdev_adjacent_dev_insert(struct net_device *dev, + struct net_device *adj_dev, + bool neighbour, bool master, + bool upper) +{ + struct netdev_adjacent *adj; + + adj = __netdev_find_adj(dev, adj_dev, upper); + + if (adj) { + BUG_ON(neighbour); + adj->ref_nr++; + return 0; + } + + adj = kmalloc(sizeof(*adj), GFP_KERNEL); + if (!adj) + return -ENOMEM; + + adj->dev = adj_dev; + adj->master = master; + adj->neighbour = neighbour; + adj->ref_nr = 1; + INIT_LIST_HEAD(&adj->search_list); + + dev_hold(adj_dev); + pr_debug("dev_hold for %s, because of %s link added from %s to %s\n", + adj_dev->name, upper ? "upper" : "lower", dev->name, + adj_dev->name); + + if (!upper) { + list_add_tail_rcu(&adj->list, &dev->lower_dev_list); + return 0; + } + + /* Ensure that master upper link is always the first item in list. */ + if (master) + list_add_rcu(&adj->list, &dev->upper_dev_list); + else + list_add_tail_rcu(&adj->list, &dev->upper_dev_list); + + return 0; +} + +static inline int __netdev_upper_dev_insert(struct net_device *dev, + struct net_device *udev, + bool master, bool neighbour) +{ + return __netdev_adjacent_dev_insert(dev, udev, neighbour, master, + true); +} + +static inline int __netdev_lower_dev_insert(struct net_device *dev, + struct net_device *ldev, + bool neighbour) +{ + return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false, + false); +} + +void __netdev_adjacent_dev_remove(struct net_device *dev, + struct net_device *adj_dev, bool upper) +{ + struct netdev_adjacent *adj; + + if (upper) + adj = __netdev_find_upper(dev, adj_dev); + else + adj = __netdev_find_lower(dev, adj_dev); + + if (!adj) + BUG(); + + if (adj->ref_nr > 1) { + adj->ref_nr--; + return; + } + + list_del_rcu(&adj->list); + pr_debug("dev_put for %s, because of %s link removed from %s to %s\n", + adj_dev->name, upper ? "upper" : "lower", dev->name, + adj_dev->name); + dev_put(adj_dev); + kfree_rcu(adj, rcu); +} + +static inline void __netdev_upper_dev_remove(struct net_device *dev, + struct net_device *udev) +{ + return __netdev_adjacent_dev_remove(dev, udev, true); +} + +static inline void __netdev_lower_dev_remove(struct net_device *dev, + struct net_device *ldev) +{ + return __netdev_adjacent_dev_remove(dev, ldev, false); +} + +int __netdev_adjacent_dev_insert_link(struct net_device *dev, + struct net_device *upper_dev, + bool master, bool neighbour) +{ + int ret; + + ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour); + if (ret) + return ret; + + ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour); + if (ret) { + __netdev_upper_dev_remove(dev, upper_dev); + return ret; + } + + return 0; +} + +static inline int __netdev_adjacent_dev_link(struct net_device *dev, + struct net_device *udev) +{ + return __netdev_adjacent_dev_insert_link(dev, udev, false, false); +} + +static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, + struct net_device *udev, + bool master) +{ + return __netdev_adjacent_dev_insert_link(dev, udev, master, true); +} + +void __netdev_adjacent_dev_unlink(struct net_device *dev, + struct net_device *upper_dev) +{ + __netdev_upper_dev_remove(dev, upper_dev); + __netdev_lower_dev_remove(upper_dev, dev); +} + + static int __netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, bool master) { - struct netdev_adjacent *upper; + struct netdev_adjacent *i, *j, *to_i, *to_j; + int ret = 0; ASSERT_RTNL(); @@ -4516,22 +4680,76 @@ static int __netdev_upper_dev_link(struct net_device *dev, if (master && netdev_master_upper_dev_get(dev)) return -EBUSY; - upper = kmalloc(sizeof(*upper), GFP_KERNEL); - if (!upper) - return -ENOMEM; + ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master); + if (ret) + return ret; - upper->dev = upper_dev; - upper->master = master; - INIT_LIST_HEAD(&upper->search_list); + /* Now that we linked these devs, make all the upper_dev's + * upper_dev_list visible to every dev's lower_dev_list and vice + * versa, and don't forget the devices itself. All of these + * links are non-neighbours. + */ + list_for_each_entry(i, &upper_dev->upper_dev_list, list) { + list_for_each_entry(j, &dev->lower_dev_list, list) { + ret = __netdev_adjacent_dev_link(i->dev, j->dev); + if (ret) + goto rollback_mesh; + } + } + + /* add dev to every upper_dev's upper device */ + list_for_each_entry(i, &upper_dev->upper_dev_list, list) { + ret = __netdev_adjacent_dev_link(dev, i->dev); + if (ret) + goto rollback_upper_mesh; + } + + /* add upper_dev to every dev's lower device */ + list_for_each_entry(i, &dev->lower_dev_list, list) { + ret = __netdev_adjacent_dev_link(i->dev, upper_dev); + if (ret) + goto rollback_lower_mesh; + } - /* Ensure that master upper link is always the first item in list. */ - if (master) - list_add_rcu(&upper->list, &dev->upper_dev_list); - else - list_add_tail_rcu(&upper->list, &dev->upper_dev_list); - dev_hold(upper_dev); call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); return 0; + +rollback_lower_mesh: + to_i = i; + list_for_each_entry(i, &dev->lower_dev_list, list) { + if (i == to_i) + break; + __netdev_adjacent_dev_unlink(i->dev, upper_dev); + } + + i = NULL; + +rollback_upper_mesh: + to_i = i; + list_for_each_entry(i, &upper_dev->upper_dev_list, list) { + if (i == to_i) + break; + __netdev_adjacent_dev_unlink(dev, i->dev); + } + + i = j = NULL; + +rollback_mesh: + to_i = i; + to_j = j; + list_for_each_entry(i, &dev->lower_dev_list, list) { + list_for_each_entry(j, &upper_dev->upper_dev_list, list) { + if (i == to_i && j == to_j) + break; + __netdev_adjacent_dev_unlink(i->dev, j->dev); + } + if (i == to_i) + break; + } + + __netdev_adjacent_dev_unlink(dev, upper_dev); + + return ret; } /** @@ -4580,16 +4798,28 @@ EXPORT_SYMBOL(netdev_master_upper_dev_link); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev) { - struct netdev_adjacent *upper; - + struct netdev_adjacent *i, *j; ASSERT_RTNL(); - upper = __netdev_find_upper(dev, upper_dev); - if (!upper) - return; - list_del_rcu(&upper->list); - dev_put(upper_dev); - kfree_rcu(upper, rcu); + __netdev_adjacent_dev_unlink(dev, upper_dev); + + /* Here is the tricky part. We must remove all dev's lower + * devices from all upper_dev's upper devices and vice + * versa, to maintain the graph relationship. + */ + list_for_each_entry(i, &dev->lower_dev_list, list) + list_for_each_entry(j, &upper_dev->upper_dev_list, list) + __netdev_adjacent_dev_unlink(i->dev, j->dev); + + /* remove also the devices itself from lower/upper device + * list + */ + list_for_each_entry(i, &dev->lower_dev_list, list) + __netdev_adjacent_dev_unlink(i->dev, upper_dev); + + list_for_each_entry(i, &upper_dev->upper_dev_list, list) + __netdev_adjacent_dev_unlink(dev, i->dev); + call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); } EXPORT_SYMBOL(netdev_upper_dev_unlink); @@ -5850,6 +6080,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, INIT_LIST_HEAD(&dev->unreg_list); INIT_LIST_HEAD(&dev->link_watch_list); INIT_LIST_HEAD(&dev->upper_dev_list); + INIT_LIST_HEAD(&dev->lower_dev_list); dev->priv_flags = IFF_XMIT_DST_RELEASE; setup(dev); -- cgit v0.10.2 From 620f3186caa8124e0efaf329751cf51c5d55c731 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:06 +0200 Subject: net: remove search_list from netdev_adjacent We already don't need it cause we see every upper/lower device in the list already. CC: "David S. Miller" CC: Eric Dumazet CC: Jiri Pirko CC: Alexander Duyck CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 2aa914e..749925a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4381,42 +4381,8 @@ struct netdev_adjacent { struct list_head list; struct rcu_head rcu; - struct list_head search_list; }; -static void __append_search_uppers(struct list_head *search_list, - struct net_device *dev) -{ - struct netdev_adjacent *upper; - - list_for_each_entry(upper, &dev->upper_dev_list, list) { - /* check if this upper is not already in search list */ - if (list_empty(&upper->search_list)) - list_add_tail(&upper->search_list, search_list); - } -} - -static bool __netdev_search_upper_dev(struct net_device *dev, - struct net_device *upper_dev) -{ - LIST_HEAD(search_list); - struct netdev_adjacent *upper; - struct netdev_adjacent *tmp; - bool ret = false; - - __append_search_uppers(&search_list, dev); - list_for_each_entry(upper, &search_list, search_list) { - if (upper->dev == upper_dev) { - ret = true; - break; - } - __append_search_uppers(&search_list, upper->dev); - } - list_for_each_entry_safe(upper, tmp, &search_list, search_list) - INIT_LIST_HEAD(&upper->search_list); - return ret; -} - static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev, struct net_device *adj_dev, bool upper) @@ -4544,7 +4510,6 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, adj->master = master; adj->neighbour = neighbour; adj->ref_nr = 1; - INIT_LIST_HEAD(&adj->search_list); dev_hold(adj_dev); pr_debug("dev_hold for %s, because of %s link added from %s to %s\n", @@ -4671,7 +4636,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, return -EBUSY; /* To prevent loops, check if dev is not upper device to upper_dev. */ - if (__netdev_search_upper_dev(upper_dev, dev)) + if (__netdev_find_upper(upper_dev, dev)) return -EBUSY; if (__netdev_find_upper(dev, upper_dev)) -- cgit v0.10.2 From 48311f46853c0361f9fba7e0e6bb1652d633c049 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:07 +0200 Subject: net: add netdev_upper_get_next_dev_rcu(dev, iter) This function returns the next dev in the dev->upper_dev_list after the struct list_head **iter position, and updates *iter accordingly. Returns NULL if there are no devices left. Caller must hold RCU read lock. CC: "David S. Miller" CC: Eric Dumazet CC: Jiri Pirko CC: Alexander Duyck CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 749925a..6fbb0c9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4468,6 +4468,31 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev) } EXPORT_SYMBOL(netdev_master_upper_dev_get); +/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list + * @dev: device + * @iter: list_head ** of the current position + * + * Gets the next device from the dev's upper list, starting from iter + * position. The caller must hold RCU read lock. + */ +struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter) +{ + struct netdev_adjacent *upper; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); + + if (&upper->list == &dev->upper_dev_list) + return NULL; + + *iter = &upper->list; + + return upper->dev; +} +EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); + /** * netdev_master_upper_dev_get_rcu - Get master upper device * @dev: device -- cgit v0.10.2 From 8b5be8561b804edf6b58fc27edbccf1d45863e08 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:08 +0200 Subject: net: add netdev_for_each_upper_dev_rcu() The new macro netdev_for_each_upper_dev_rcu(dev, upper, iter) iterates through the dev->upper_dev_list starting from the first element, using the netdev_upper_get_next_dev_rcu(dev, &iter). Must be called under RCU read lock. CC: "David S. Miller" CC: Eric Dumazet CC: Jiri Pirko CC: Alexander Duyck CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5ccf5b7..3ad49b8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2768,6 +2768,16 @@ extern int bpf_jit_enable; extern bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); extern bool netdev_has_any_upper_dev(struct net_device *dev); +extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter); + +/* iterate through upper list, must be called under RCU read lock */ +#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \ + for (iter = &(dev)->upper_dev_list, \ + upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ + upper; \ + upper = netdev_upper_get_next_dev_rcu(dev, &(iter))) + extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev); extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); extern int netdev_upper_dev_link(struct net_device *dev, -- cgit v0.10.2 From c752af2c555617017dbb3a66f81e974b66ad5e2d Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:09 +0200 Subject: bonding: use netdev_upper list in bond_vlan_used Convert bond_vlan_used() to traverse the upper device list to see if we have any vlans above us. It's protected by rcu, and in case we are holding rtnl_lock we should call vlan_uses_dev() instead - it's faster. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 4bf52d5..230197d 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -267,9 +267,22 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ }; +/* if we hold rtnl_lock() - call vlan_uses_dev() */ static inline bool bond_vlan_used(struct bonding *bond) { - return !list_empty(&bond->vlan_list); + struct net_device *upper; + struct list_head *iter; + + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { + if (upper->priv_flags & IFF_802_1Q_VLAN) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; } #define bond_slave_get_rcu(dev) \ -- cgit v0.10.2 From 27bc11e63888c7cb0bd6d443e98775254cf7dbdd Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:10 +0200 Subject: bonding: make bond_arp_send_all use upper device list Currently, bond_arp_send_all() is aware only of vlans, which breaks configurations like bond <- bridge (or any other 'upper' device) with IP (which is quite a common scenario for virt setups). To fix this we convert the bond_arp_send_all() to first verify if the rt device is the bond itself, and if not - to go through its list of upper vlans and their respectiv upper devices (if the vlan's upper device matches - tag the packet), if still not found - go through all of our upper list devices to see if any of them match the route device for the target. If the match is a vlan device - we also save its vlan_id and tag it in bond_arp_send(). Also, clean the function a bit to be more readable. CC: Vlad Yasevich CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7407e65..28f4ad0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2444,30 +2444,16 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { - int i, vlan_id; - __be32 *targets = bond->params.arp_targets; - struct vlan_entry *vlan; - struct net_device *vlan_dev = NULL; + struct net_device *upper, *vlan_upper; + struct list_head *iter, *vlan_iter; struct rtable *rt; + __be32 *targets = bond->params.arp_targets, addr; + int i, vlan_id; - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { - __be32 addr; - if (!targets[i]) - break; + for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { pr_debug("basa: target %pI4\n", &targets[i]); - if (!bond_vlan_used(bond)) { - pr_debug("basa: empty vlan: arp_send\n"); - addr = bond_confirm_addr(bond->dev, targets[i], 0); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, 0); - continue; - } - /* - * If VLANs are configured, we do a route lookup to - * determine which VLAN interface would be used, so we - * can tag the ARP with the proper VLAN tag. - */ + /* Find out through which dev should the packet go */ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, RTO_ONLINK, 0); if (IS_ERR(rt)) { @@ -2478,47 +2464,61 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) continue; } - /* - * This target is not on a VLAN - */ - if (rt->dst.dev == bond->dev) { - ip_rt_put(rt); - pr_debug("basa: rtdev == bond->dev: arp_send\n"); - addr = bond_confirm_addr(bond->dev, targets[i], 0); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, 0); - continue; - } - vlan_id = 0; - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - rcu_read_lock(); - vlan_dev = __vlan_find_dev_deep(bond->dev, - htons(ETH_P_8021Q), - vlan->vlan_id); - rcu_read_unlock(); - if (vlan_dev == rt->dst.dev) { - vlan_id = vlan->vlan_id; - pr_debug("basa: vlan match on %s %d\n", - vlan_dev->name, vlan_id); - break; + + /* bond device itself */ + if (rt->dst.dev == bond->dev) + goto found; + + rcu_read_lock(); + /* first we search only for vlan devices. for every vlan + * found we verify its upper dev list, searching for the + * rt->dst.dev. If found we save the tag of the vlan and + * proceed to send the packet. + * + * TODO: QinQ? + */ + netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) { + if (!is_vlan_dev(vlan_upper)) + continue; + netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) { + if (upper == rt->dst.dev) { + vlan_id = vlan_dev_vlan_id(vlan_upper); + rcu_read_unlock(); + goto found; + } } } - if (vlan_id && vlan_dev) { - ip_rt_put(rt); - addr = bond_confirm_addr(vlan_dev, targets[i], 0); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, vlan_id); - continue; + /* if the device we're looking for is not on top of any of + * our upper vlans, then just search for any dev that + * matches, and in case it's a vlan - save the id + */ + netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { + if (upper == rt->dst.dev) { + /* if it's a vlan - get its VID */ + if (is_vlan_dev(upper)) + vlan_id = vlan_dev_vlan_id(upper); + + rcu_read_unlock(); + goto found; + } } + rcu_read_unlock(); - if (net_ratelimit()) { + /* Not our device - skip */ + if (net_ratelimit()) pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL"); - } ip_rt_put(rt); + continue; + +found: + addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); + ip_rt_put(rt); + bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], + addr, vlan_id); } } -- cgit v0.10.2 From 50223ce4be70367ca5d8135bfd4c976e148bc491 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:11 +0200 Subject: bonding: convert bond_has_this_ip() to use upper devices Currently, bond_has_this_ip() is aware only of vlan upper devices, and thus will return false if the address is associated with the upper bridge or any other device, and thus will break the arp logic. Fix this by using the upper device list. For every upper device we verify if the address associated with it is our address, and if yes - return true. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 28f4ad0..f38d590 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2392,24 +2392,25 @@ re_arm: } } -static int bond_has_this_ip(struct bonding *bond, __be32 ip) +static bool bond_has_this_ip(struct bonding *bond, __be32 ip) { - struct vlan_entry *vlan; - struct net_device *vlan_dev; + struct net_device *upper; + struct list_head *iter; + bool ret = false; if (ip == bond_confirm_addr(bond->dev, 0, ip)) - return 1; + return true; - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - rcu_read_lock(); - vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q), - vlan->vlan_id); - rcu_read_unlock(); - if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip)) - return 1; + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { + if (ip == bond_confirm_addr(upper, 0, ip)) { + ret = true; + break; + } } + rcu_read_unlock(); - return 0; + return ret; } /* -- cgit v0.10.2 From a59d3d21ea7636d4cc7fb921104b9b4a59196839 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:12 +0200 Subject: bonding: use vlan_uses_dev() in __bond_release_one() We always hold the rtnl_lock() in __bond_release_one(), so use vlan_uses_dev() instead of bond_vlan_used(). CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f38d590..55a48d3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1954,7 +1954,7 @@ static int __bond_release_one(struct net_device *bond_dev, bond_set_carrier(bond); eth_hw_addr_random(bond_dev); - if (bond_vlan_used(bond)) { + if (vlan_uses_dev(bond_dev)) { pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", bond_dev->name, bond_dev->name); pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", -- cgit v0.10.2 From 7aa6498123a67001e950808ce6eb5930a4c5945f Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:13 +0200 Subject: bonding: split alb_send_learning_packets() Create alb_send_lp_vid(), which will handle the skb/lp creation, vlan tagging and sending, and use it in alb_send_learning_packets(). This way all the logic remains in alb_send_learning_packets(), which becomes a lot more cleaner and easier to understand. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 3a5db7b..3ca3c85 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -971,35 +971,53 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) /*********************** tlb/rlb shared functions *********************/ -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) +static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], + u16 vid) { - struct bonding *bond = bond_get_bond_by_slave(slave); struct learning_pkt pkt; + struct sk_buff *skb; int size = sizeof(struct learning_pkt); - int i; + char *data; memset(&pkt, 0, size); memcpy(pkt.mac_dst, mac_addr, ETH_ALEN); memcpy(pkt.mac_src, mac_addr, ETH_ALEN); pkt.type = cpu_to_be16(ETH_P_LOOP); - for (i = 0; i < MAX_LP_BURST; i++) { - struct sk_buff *skb; - char *data; + skb = dev_alloc_skb(size); + if (!skb) + return; - skb = dev_alloc_skb(size); + data = skb_put(skb, size); + memcpy(data, &pkt, size); + + skb_reset_mac_header(skb); + skb->network_header = skb->mac_header + ETH_HLEN; + skb->protocol = pkt.type; + skb->priority = TC_PRIO_CONTROL; + skb->dev = slave->dev; + + if (vid) { + skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); if (!skb) { + pr_err("%s: Error: failed to insert VLAN tag\n", + slave->bond->dev->name); return; } + } - data = skb_put(skb, size); - memcpy(data, &pkt, size); + dev_queue_xmit(skb); +} - skb_reset_mac_header(skb); - skb->network_header = skb->mac_header + ETH_HLEN; - skb->protocol = pkt.type; - skb->priority = TC_PRIO_CONTROL; - skb->dev = slave->dev; + +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) +{ + struct bonding *bond = bond_get_bond_by_slave(slave); + u16 vlan_id; + int i; + + for (i = 0; i < MAX_LP_BURST; i++) { + vlan_id = 0; if (bond_vlan_used(bond)) { struct vlan_entry *vlan; @@ -1008,20 +1026,13 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) bond->alb_info.current_alb_vlan); bond->alb_info.current_alb_vlan = vlan; - if (!vlan) { - kfree_skb(skb); + if (!vlan) continue; - } - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id); - if (!skb) { - pr_err("%s: Error: failed to insert VLAN tag\n", - bond->dev->name); - continue; - } + vlan_id = vlan->vlan_id; } - dev_queue_xmit(skb); + alb_send_lp_vid(slave, mac_addr, vlan_id); } } -- cgit v0.10.2 From 5bf94b839a44aa9d8199607c32b95832f2892ee6 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:14 +0200 Subject: bonding: make alb_send_learning_packets() use upper dev list Currently, if there are vlans on top of bond, alb_send_learning_packets() will never send LPs from the bond itself (i.e. untagged), which might leave untagged clients unupdated. Also, the 'circular vlan' logic (i.e. update only MAX_LP_BURST vlans at a time, and save the last vlan for the next update) is really suboptimal - in case of lots of vlans it will take a lot of time to update every vlan. It is also never called in any hot path and sends only a few small packets - thus the optimization by itself is useless. So remove the whole current_alb_vlan/MAX_LP_BURST logic from alb_send_learning_packets(). Instead, we'll first send a packet untagged and then traverse the upper dev list, sending a tagged packet for each vlan found. Also, remove the MAX_LP_BURST define - we already don't need it. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 3ca3c85..b6a68b4 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1013,27 +1013,20 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) { struct bonding *bond = bond_get_bond_by_slave(slave); - u16 vlan_id; - int i; - - for (i = 0; i < MAX_LP_BURST; i++) { - vlan_id = 0; - - if (bond_vlan_used(bond)) { - struct vlan_entry *vlan; + struct net_device *upper; + struct list_head *iter; - vlan = bond_next_vlan(bond, - bond->alb_info.current_alb_vlan); - - bond->alb_info.current_alb_vlan = vlan; - if (!vlan) - continue; - - vlan_id = vlan->vlan_id; - } + /* send untagged */ + alb_send_lp_vid(slave, mac_addr, 0); - alb_send_lp_vid(slave, mac_addr, vlan_id); + /* loop through vlans and send one packet for each */ + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { + if (upper->priv_flags & IFF_802_1Q_VLAN) + alb_send_lp_vid(slave, mac_addr, + vlan_dev_vlan_id(upper)); } + rcu_read_unlock(); } static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index e7a5b8b..e139172 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@ -53,7 +53,6 @@ struct slave; #define TLB_NULL_INDEX 0xffffffff -#define MAX_LP_BURST 3 /* rlb defs */ #define RLB_HASH_TABLE_SIZE 256 -- cgit v0.10.2 From e868b0c938d9cc0d7ed4bd77d5c37676e833ed95 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:15 +0200 Subject: bonding: remove vlan_list/current_alb_vlan Currently there are no real users of vlan_list/current_alb_vlan, only the helpers which maintain them, so remove them. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index b6a68b4..0182352 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1763,11 +1763,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id) { - if (bond->alb_info.current_alb_vlan && - (bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) { - bond->alb_info.current_alb_vlan = NULL; - } - if (bond->alb_info.rlb_enabled) { rlb_clear_vlan(bond, vlan_id); } diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index e139172..e02c9c5 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@ -169,7 +169,6 @@ struct alb_bond_info { * rx traffic should be * rebalanced */ - struct vlan_entry *current_alb_vlan; }; int bond_alb_initialize(struct bonding *bond, int rlb_enabled); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 55a48d3..b87ad76 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -283,116 +283,6 @@ const char *bond_mode_name(int mode) /*---------------------------------- VLAN -----------------------------------*/ /** - * bond_add_vlan - add a new vlan id on bond - * @bond: bond that got the notification - * @vlan_id: the vlan id to add - * - * Returns -ENOMEM if allocation failed. - */ -static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id) -{ - struct vlan_entry *vlan; - - pr_debug("bond: %s, vlan id %d\n", - (bond ? bond->dev->name : "None"), vlan_id); - - vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL); - if (!vlan) - return -ENOMEM; - - INIT_LIST_HEAD(&vlan->vlan_list); - vlan->vlan_id = vlan_id; - - write_lock_bh(&bond->lock); - - list_add_tail(&vlan->vlan_list, &bond->vlan_list); - - write_unlock_bh(&bond->lock); - - pr_debug("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name); - - return 0; -} - -/** - * bond_del_vlan - delete a vlan id from bond - * @bond: bond that got the notification - * @vlan_id: the vlan id to delete - * - * returns -ENODEV if @vlan_id was not found in @bond. - */ -static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) -{ - struct vlan_entry *vlan; - int res = -ENODEV; - - pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); - - block_netpoll_tx(); - write_lock_bh(&bond->lock); - - list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { - if (vlan->vlan_id == vlan_id) { - list_del(&vlan->vlan_list); - - if (bond_is_lb(bond)) - bond_alb_clear_vlan(bond, vlan_id); - - pr_debug("removed VLAN ID %d from bond %s\n", - vlan_id, bond->dev->name); - - kfree(vlan); - - res = 0; - goto out; - } - } - - pr_debug("couldn't find VLAN ID %d in bond %s\n", - vlan_id, bond->dev->name); - -out: - write_unlock_bh(&bond->lock); - unblock_netpoll_tx(); - return res; -} - -/** - * bond_next_vlan - safely skip to the next item in the vlans list. - * @bond: the bond we're working on - * @curr: item we're advancing from - * - * Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL, - * or @curr->next otherwise (even if it is @curr itself again). - * - * Caller must hold bond->lock - */ -struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) -{ - struct vlan_entry *next, *last; - - if (list_empty(&bond->vlan_list)) - return NULL; - - if (!curr) { - next = list_entry(bond->vlan_list.next, - struct vlan_entry, vlan_list); - } else { - last = list_entry(bond->vlan_list.prev, - struct vlan_entry, vlan_list); - if (last == curr) { - next = list_entry(bond->vlan_list.next, - struct vlan_entry, vlan_list); - } else { - next = list_entry(curr->vlan_list.next, - struct vlan_entry, vlan_list); - } - } - - return next; -} - -/** * bond_dev_queue_xmit - Prepare skb for xmit. * * @bond: bond device that got this skb for tx. @@ -451,13 +341,6 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, goto unwind; } - res = bond_add_vlan(bond, vid); - if (res) { - pr_err("%s: Error: Failed to add vlan id %d\n", - bond_dev->name, vid); - goto unwind; - } - return 0; unwind: @@ -478,17 +361,12 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - int res; bond_for_each_slave(bond, slave) vlan_vid_del(slave->dev, proto, vid); - res = bond_del_vlan(bond, vid); - if (res) { - pr_err("%s: Error: Failed to remove vlan id %d\n", - bond_dev->name, vid); - return res; - } + if (bond_is_lb(bond)) + bond_alb_clear_vlan(bond, vid); return 0; } @@ -4143,7 +4021,6 @@ static void bond_setup(struct net_device *bond_dev) /* Initialize pointers */ bond->dev = bond_dev; - INIT_LIST_HEAD(&bond->vlan_list); /* Initialize the device entry points */ ether_setup(bond_dev); @@ -4196,7 +4073,6 @@ static void bond_uninit(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *tmp_slave; - struct vlan_entry *vlan, *tmp; bond_netpoll_cleanup(bond_dev); @@ -4208,11 +4084,6 @@ static void bond_uninit(struct net_device *bond_dev) list_del(&bond->bond_list); bond_debug_unregister(bond); - - list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { - list_del(&vlan->vlan_list); - kfree(vlan); - } } /*------------------------- Module initialization ---------------------------*/ diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 230197d..4abc925 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -185,11 +185,6 @@ struct bond_parm_tbl { #define BOND_MAX_MODENAME_LEN 20 -struct vlan_entry { - struct list_head vlan_list; - unsigned short vlan_id; -}; - struct slave { struct net_device *dev; /* first - useful for panic debug */ struct list_head list; @@ -254,7 +249,6 @@ struct bonding { struct ad_bond_info ad_info; struct alb_bond_info alb_info; struct bond_params params; - struct list_head vlan_list; struct workqueue_struct *wq; struct delayed_work mii_work; struct delayed_work arp_work; -- cgit v0.10.2 From 3e32582f7d5b6f364ddd31110bf7277d69fd4e91 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 28 Aug 2013 23:25:16 +0200 Subject: bonding: pr_debug instead of pr_warn in bond_arp_send_all They're simply annoying and will spam dmesg constantly if we hit them, so convert to pr_debug so that we still can access them in case of debugging. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b87ad76..c50679f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2336,10 +2336,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) rt = ip_route_output(dev_net(bond->dev), targets[i], 0, RTO_ONLINK, 0); if (IS_ERR(rt)) { - if (net_ratelimit()) { - pr_warning("%s: no route to arp_ip_target %pI4\n", - bond->dev->name, &targets[i]); - } + pr_debug("%s: no route to arp_ip_target %pI4\n", + bond->dev->name, &targets[i]); continue; } @@ -2386,10 +2384,10 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) rcu_read_unlock(); /* Not our device - skip */ - if (net_ratelimit()) - pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", - bond->dev->name, &targets[i], - rt->dst.dev ? rt->dst.dev->name : "NULL"); + pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", + bond->dev->name, &targets[i], + rt->dst.dev ? rt->dst.dev->name : "NULL"); + ip_rt_put(rt); continue; -- cgit v0.10.2 From 488594883e25cc6e40df067a9a7b39737ebb18d8 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 30 Aug 2013 00:24:47 +0400 Subject: sh_eth: no need to call ether_setup() There's no need to call ether_setup() in the driver since prior alloc_etherdev() call already arranges for it. Suggested-by: Denis Kirjanov Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c3570764..4b47300 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2639,9 +2639,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); - /* Fill in the fields of the device structure with ethernet values. */ - ether_setup(ndev); - mdp = netdev_priv(ndev); mdp->num_tx_ring = TX_RING_SIZE; mdp->num_rx_ring = RX_RING_SIZE; -- cgit v0.10.2 From 5df0ddfbc9209ffafc82236509ba0e975120e3c3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 28 Aug 2013 22:13:09 +0200 Subject: net: packet: add randomized fanout scheduler We currently allow for different fanout scheduling policies in pf_packet such as scheduling by skb's rxhash, round-robin, by cpu, and rollover. Also allow for a random, equidistributed selection of the socket from the fanout process group. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index b950c02..dbf0666 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -56,6 +56,7 @@ struct sockaddr_ll { #define PACKET_FANOUT_LB 1 #define PACKET_FANOUT_CPU 2 #define PACKET_FANOUT_ROLLOVER 3 +#define PACKET_FANOUT_RND 4 #define PACKET_FANOUT_FLAG_ROLLOVER 0x1000 #define PACKET_FANOUT_FLAG_DEFRAG 0x8000 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1fdf9ab..bee9bfd 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -88,7 +88,7 @@ #include #include #include - +#include #ifdef CONFIG_INET #include #endif @@ -1158,6 +1158,13 @@ static unsigned int fanout_demux_cpu(struct packet_fanout *f, return smp_processor_id() % num; } +static unsigned int fanout_demux_rnd(struct packet_fanout *f, + struct sk_buff *skb, + unsigned int num) +{ + return reciprocal_divide(prandom_u32(), num); +} + static unsigned int fanout_demux_rollover(struct packet_fanout *f, struct sk_buff *skb, unsigned int idx, unsigned int skip, @@ -1215,6 +1222,9 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, case PACKET_FANOUT_CPU: idx = fanout_demux_cpu(f, skb, num); break; + case PACKET_FANOUT_RND: + idx = fanout_demux_rnd(f, skb, num); + break; case PACKET_FANOUT_ROLLOVER: idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num); break; @@ -1284,6 +1294,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) case PACKET_FANOUT_HASH: case PACKET_FANOUT_LB: case PACKET_FANOUT_CPU: + case PACKET_FANOUT_RND: break; default: return -EINVAL; -- cgit v0.10.2 From f55d112e529386af3667fac3a507132a361b2154 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 28 Aug 2013 22:13:10 +0200 Subject: net: packet: use reciprocal_divide in fanout_demux_hash Instead of hard-coding reciprocal_divide function, use the inline function from reciprocal_div.h. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index bee9bfd..2e8286b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1135,7 +1135,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { - return (((u64)skb->rxhash) * num) >> 32; + return reciprocal_divide(skb->rxhash, num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, -- cgit v0.10.2 From 7ec06da81d2b98859b558d8d551a0c4e3d9516a3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 28 Aug 2013 22:13:11 +0200 Subject: net: packet: document available fanout policies Update documentation to add fanout policies that are available. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index 8572796..c012236 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt @@ -543,6 +543,14 @@ TPACKET_V2 --> TPACKET_V3: In the AF_PACKET fanout mode, packet reception can be load balanced among processes. This also works in combination with mmap(2) on packet sockets. +Currently implemented fanout policies are: + + - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash + - PACKET_FANOUT_LB: schedule to socket by round-robin + - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on + - PACKET_FANOUT_RND: schedule to socket by random selection + - PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another + Minimal example code by David S. Miller (try things like "./test eth0 hash", "./test eth0 lb", etc.): -- cgit v0.10.2 From 3a09b12e0c2ef26a0e2230606a6159d3038a1572 Mon Sep 17 00:00:00 2001 From: Gerhard Sittig Date: Fri, 23 Aug 2013 13:09:02 +0200 Subject: can: mscan: add a comment on reg to idx mapping add a comment about the magic of deriving an MSCAN component index from the peripheral's physical address / register offset Cc: linux-can@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Gerhard Sittig Signed-off-by: Anatolij Gustschin Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 5b0ee8e..bc422ba 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -148,7 +148,10 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev, goto exit_put; } - /* Determine the MSCAN device index from the physical address */ + /* Determine the MSCAN device index from the peripheral's + * physical address. Register address offsets against the + * IMMR base are: 0x1300, 0x1380, 0x2300, 0x2380 + */ pval = of_get_property(ofdev->dev.of_node, "reg", &plen); BUG_ON(!pval || plen < sizeof(*pval)); clockidx = (*pval & 0x80) ? 1 : 0; -- cgit v0.10.2 From 1149108e2fbf98899447d4567901bf07825ee576 Mon Sep 17 00:00:00 2001 From: Gerhard Sittig Date: Fri, 23 Aug 2013 13:09:03 +0200 Subject: can: mscan: improve clock API use the .get_clock() callback is run from probe() and might allocate resources, introduce a .put_clock() callback that is run from remove() to undo any allocation activities prepare and enable the clocks in open(), disable and unprepare the clocks in close() if clocks were acquired during probe(), to not assume knowledge about which activities are done in probe() and remove() use devm_get_clk() to lookup the SYS and REF clocks, to have the clocks put upon device shutdown store pointers to data structures upon successful allocation already instead of deferral until complete setup, such that subroutines in the setup sequence may access those data structures as well to track their resource acquisition since clock allocation remains optional, the release callback as well as the enable/disable calls in open/close are optional as well Cc: linux-can@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Gerhard Sittig Signed-off-by: Anatolij Gustschin Signed-off-by: Marc Kleine-Budde diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index bc422ba..e59b3a3 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -40,6 +40,7 @@ struct mpc5xxx_can_data { unsigned int type; u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc); + void (*put_clock)(struct platform_device *ofdev); }; #ifdef CONFIG_PPC_MPC52xx @@ -180,7 +181,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev, clockdiv = 1; if (!clock_name || !strcmp(clock_name, "sys")) { - sys_clk = clk_get(&ofdev->dev, "sys_clk"); + sys_clk = devm_clk_get(&ofdev->dev, "sys_clk"); if (IS_ERR(sys_clk)) { dev_err(&ofdev->dev, "couldn't get sys_clk\n"); goto exit_unmap; @@ -203,7 +204,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev, } if (clocksrc < 0) { - ref_clk = clk_get(&ofdev->dev, "ref_clk"); + ref_clk = devm_clk_get(&ofdev->dev, "ref_clk"); if (IS_ERR(ref_clk)) { dev_err(&ofdev->dev, "couldn't get ref_clk\n"); goto exit_unmap; @@ -280,6 +281,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) dev = alloc_mscandev(); if (!dev) goto exit_dispose_irq; + platform_set_drvdata(ofdev, dev); + SET_NETDEV_DEV(dev, &ofdev->dev); priv = netdev_priv(dev); priv->reg_base = base; @@ -296,8 +299,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) goto exit_free_mscan; } - SET_NETDEV_DEV(dev, &ofdev->dev); - err = register_mscandev(dev, mscan_clksrc); if (err) { dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", @@ -305,8 +306,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) goto exit_free_mscan; } - platform_set_drvdata(ofdev, dev); - dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", priv->reg_base, dev->irq, priv->can.clock.freq); @@ -324,10 +323,17 @@ exit_unmap_mem: static int mpc5xxx_can_remove(struct platform_device *ofdev) { + const struct of_device_id *match; + const struct mpc5xxx_can_data *data; struct net_device *dev = platform_get_drvdata(ofdev); struct mscan_priv *priv = netdev_priv(dev); + match = of_match_device(mpc5xxx_can_table, &ofdev->dev); + data = match ? match->data : NULL; + unregister_mscandev(dev); + if (data && data->put_clock) + data->put_clock(ofdev); iounmap(priv->reg_base); irq_dispose_mapping(dev->irq); free_candev(dev); diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index e6b4095..a955ec8 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -573,10 +573,21 @@ static int mscan_open(struct net_device *dev) struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; + if (priv->clk_ipg) { + ret = clk_prepare_enable(priv->clk_ipg); + if (ret) + goto exit_retcode; + } + if (priv->clk_can) { + ret = clk_prepare_enable(priv->clk_can); + if (ret) + goto exit_dis_ipg_clock; + } + /* common open */ ret = open_candev(dev); if (ret) - return ret; + goto exit_dis_can_clock; napi_enable(&priv->napi); @@ -604,6 +615,13 @@ exit_free_irq: exit_napi_disable: napi_disable(&priv->napi); close_candev(dev); +exit_dis_can_clock: + if (priv->clk_can) + clk_disable_unprepare(priv->clk_can); +exit_dis_ipg_clock: + if (priv->clk_ipg) + clk_disable_unprepare(priv->clk_ipg); +exit_retcode: return ret; } @@ -621,6 +639,11 @@ static int mscan_close(struct net_device *dev) close_candev(dev); free_irq(dev->irq, dev); + if (priv->clk_can) + clk_disable_unprepare(priv->clk_can); + if (priv->clk_ipg) + clk_disable_unprepare(priv->clk_ipg); + return 0; } diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h index af2ed8b..9c24d60 100644 --- a/drivers/net/can/mscan/mscan.h +++ b/drivers/net/can/mscan/mscan.h @@ -21,6 +21,7 @@ #ifndef __MSCAN_H__ #define __MSCAN_H__ +#include #include /* MSCAN control register 0 (CANCTL0) bits */ @@ -283,6 +284,8 @@ struct mscan_priv { unsigned int type; /* MSCAN type variants */ unsigned long flags; void __iomem *reg_base; /* ioremap'ed address to registers */ + struct clk *clk_ipg; /* clock for registers */ + struct clk *clk_can; /* clock for bitrates */ u8 shadow_statflg; u8 shadow_canrier; u8 cur_pri; -- cgit v0.10.2 From 391ac1282dd7ff1cb8245cccc5262e8e4173edc4 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Mon, 26 Aug 2013 15:05:36 +0200 Subject: can: gw: add a per rule limitation of frame hops Usually the received CAN frames can be processed/routed as much as 'max_hops' times (which is given at module load time of the can-gw module). Introduce a new configuration option to reduce the number of possible hops for a specific gateway rule to a value smaller then max_hops. Signed-off-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h index ae07bec..4e27c82 100644 --- a/include/uapi/linux/can/gw.h +++ b/include/uapi/linux/can/gw.h @@ -45,6 +45,7 @@ enum { CGW_DST_IF, /* ifindex of destination network interface */ CGW_FILTER, /* specify struct can_filter on source CAN device */ CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */ + CGW_LIM_HOPS, /* limit the number of hops of this specific rule */ __CGW_MAX }; @@ -116,13 +117,19 @@ enum { * Sets a CAN receive filter for the gateway job specified by the * struct can_filter described in include/linux/can.h * - * CGW_MOD_XXX (length 17 bytes): + * CGW_MOD_(AND|OR|XOR|SET) (length 17 bytes): * Specifies a modification that's done to a received CAN frame before it is * send out to the destination interface. * * data used as operator * affected CAN frame elements * + * CGW_LIM_HOPS (length 1 byte): + * Limit the number of hops of this specific rule. Usually the received CAN + * frame can be processed as much as 'max_hops' times (which is given at module + * load time of the can-gw module). This value is used to reduce the number of + * possible hops for this gateway rule to a value smaller then max_hops. + * * CGW_CS_XOR (length 4 bytes): * Set a simple XOR checksum starting with an initial value into * data[result-idx] using data[start-idx] .. data[end-idx] diff --git a/net/can/gw.c b/net/can/gw.c index 2f291f9..3f9b0f3 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -146,6 +146,7 @@ struct cgw_job { /* tbc */ }; u8 gwtype; + u8 limit_hops; u16 flags; }; @@ -402,6 +403,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) /* put the incremented hop counter in the cloned skb */ cgw_hops(nskb) = cgw_hops(skb) + 1; + + /* first processing of this CAN frame -> adjust to private hop limit */ + if (gwj->limit_hops && cgw_hops(nskb) == 1) + cgw_hops(nskb) = max_hops - gwj->limit_hops + 1; + nskb->dev = gwj->dst.dev; /* pointer to modifiable CAN frame */ @@ -509,6 +515,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, /* check non default settings of attributes */ + if (gwj->limit_hops) { + if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0) + goto cancel; + } + if (gwj->mod.modtype.and) { memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.and; @@ -606,11 +617,12 @@ static const struct nla_policy cgw_policy[CGW_MAX+1] = { [CGW_SRC_IF] = { .type = NLA_U32 }, [CGW_DST_IF] = { .type = NLA_U32 }, [CGW_FILTER] = { .len = sizeof(struct can_filter) }, + [CGW_LIM_HOPS] = { .type = NLA_U8 }, }; /* check for common and gwtype specific attributes */ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, - u8 gwtype, void *gwtypeattr) + u8 gwtype, void *gwtypeattr, u8 *limhops) { struct nlattr *tb[CGW_MAX+1]; struct cgw_frame_mod mb; @@ -625,6 +637,13 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, if (err < 0) return err; + if (tb[CGW_LIM_HOPS]) { + *limhops = nla_get_u8(tb[CGW_LIM_HOPS]); + + if (*limhops < 1 || *limhops > max_hops) + return -EINVAL; + } + /* check for AND/OR/XOR/SET modifications */ if (tb[CGW_MOD_AND]) { @@ -782,6 +801,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) { struct rtcanmsg *r; struct cgw_job *gwj; + u8 limhops = 0; int err = 0; if (!capable(CAP_NET_ADMIN)) @@ -808,7 +828,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) gwj->flags = r->flags; gwj->gwtype = r->gwtype; - err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw); + err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw, + &limhops); if (err < 0) goto out; @@ -836,6 +857,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops) goto put_src_dst_out; + gwj->limit_hops = limhops; + ASSERT_RTNL(); err = cgw_register_filter(gwj); @@ -867,13 +890,14 @@ static void cgw_remove_all_jobs(void) } } -static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) +static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) { struct cgw_job *gwj = NULL; struct hlist_node *nx; struct rtcanmsg *r; struct cf_mod mod; struct can_can_gw ccgw; + u8 limhops = 0; int err = 0; if (!capable(CAP_NET_ADMIN)) @@ -890,7 +914,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) if (r->gwtype != CGW_TYPE_CAN_CAN) return -EINVAL; - err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw); + err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); if (err < 0) return err; @@ -910,6 +934,9 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) if (gwj->flags != r->flags) continue; + if (gwj->limit_hops != limhops) + continue; + if (memcmp(&gwj->mod, &mod, sizeof(mod))) continue; -- cgit v0.10.2 From f7a6d2c4427790cc8695401576dc594fcce8fc80 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 29 Aug 2013 23:32:48 +0100 Subject: sfc: Update copyright banners Update the dates for files that have been added to in 2012-2013. Drop the 'Solarstorm' brand name that's still lingering here. Signed-off-by: Ben Hutchings diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h index f45b0db..17d83f3 100644 --- a/drivers/net/ethernet/sfc/bitfield.h +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 5b66c5e..07c9bc4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2011 Solarflare Communications Inc. + * Copyright 2005-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 9de28f6..34d00f5 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index 8665921..7fdfee0 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2007-2009 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2007-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 78ddb48..5b471cf 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index a7b30ddb..8685f99 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c index ec1e99d..1736f4b 100644 --- a/drivers/net/ethernet/sfc/falcon_boards.c +++ b/drivers/net/ethernet/sfc/falcon_boards.c @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2007-2010 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2007-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 65073aa..c0907d8 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2011 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/farch_regs.h b/drivers/net/ethernet/sfc/farch_regs.h index 491b103..7019a71 100644 --- a/drivers/net/ethernet/sfc/farch_regs.h +++ b/drivers/net/ethernet/sfc/farch_regs.h @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index e459e43..63c77a5 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2005-2010 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2005-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 416ee4a..96ce507 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 272f201..128d7cd 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2008-2011 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 5919aca..c34d0d4 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2008-2010 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index e22d938..4cc5d95 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2011 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 03faf2f..8d33da6 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2009-2010 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2009-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c index 9acfd66..8ff954c 100644 --- a/drivers/net/ethernet/sfc/mdio_10g.c +++ b/drivers/net/ethernet/sfc/mdio_10g.c @@ -1,5 +1,5 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h index a97dbbd..16824fe 100644 --- a/drivers/net/ethernet/sfc/mdio_10g.h +++ b/drivers/net/ethernet/sfc/mdio_10g.h @@ -1,5 +1,5 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index 8be9a69..a77a8bd 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 753df15..b172ed1 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2011 Solarflare Communications Inc. + * Copyright 2005-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index b9b1277..e8e01a1 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2011 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 4bd5359..4b1e188 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2011 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h index 4f6eb81..45eeb70 100644 --- a/drivers/net/ethernet/sfc/phy.h +++ b/drivers/net/ethernet/sfc/phy.h @@ -1,5 +1,5 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2007-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index c60cabb..03acf57 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2011 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c index 326a286..efa3612 100644 --- a/drivers/net/ethernet/sfc/qt202x_phy.c +++ b/drivers/net/ethernet/sfc/qt202x_phy.c @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2006-2010 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2006-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8c13dd8..4a59672 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2011 Solarflare Communications Inc. + * Copyright 2005-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 716cff9..144bbff 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h index aed24b7..87698ae 100644 --- a/drivers/net/ethernet/sfc/selftest.h +++ b/drivers/net/ethernet/sfc/selftest.h @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index e8eef63..d034bcd 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 4b8eef9..0c38f92 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2010-2011 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2010-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c index d37cb50..2c90e6b 100644 --- a/drivers/net/ethernet/sfc/tenxpress.c +++ b/drivers/net/ethernet/sfc/tenxpress.c @@ -1,5 +1,5 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2007-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 82075f9..2ac91c5 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2010 Solarflare Communications Inc. + * Copyright 2005-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c index 29bb3f9..3d5ee32 100644 --- a/drivers/net/ethernet/sfc/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/txc43128_phy.c @@ -1,5 +1,5 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h index 225557c..ae044f4 100644 --- a/drivers/net/ethernet/sfc/vfdi.h +++ b/drivers/net/ethernet/sfc/vfdi.h @@ -1,5 +1,5 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2010-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index 8848ec0..2310b75 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -1,6 +1,6 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2006-2010 Solarflare Communications Inc. + * Driver for Solarflare network controllers and boards + * Copyright 2006-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published -- cgit v0.10.2 From afe4fd062416b158a8a8538b23adc1930a9b88dc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 29 Aug 2013 15:49:55 -0700 Subject: pkt_sched: fq: Fair Queue packet scheduler - Uses perfect flow match (not stochastic hash like SFQ/FQ_codel) - Uses the new_flow/old_flow separation from FQ_codel - New flows get an initial credit allowing IW10 without added delay. - Special FIFO queue for high prio packets (no need for PRIO + FQ) - Uses a hash table of RB trees to locate the flows at enqueue() time - Smart on demand gc (at enqueue() time, RB tree lookup evicts old unused flows) - Dynamic memory allocations. - Designed to allow millions of concurrent flows per Qdisc. - Small memory footprint : ~8K per Qdisc, and 104 bytes per flow. - Single high resolution timer for throttled flows (if any). - One RB tree to link throttled flows. - Ability to have a max rate per flow. We might add a socket option to add per socket limitation. Attempts have been made to add TCP pacing in TCP stack, but this seems to add complex code to an already complex stack. TCP pacing is welcomed for flows having idle times, as the cwnd permits TCP stack to queue a possibly large number of packets. This removes the 'slow start after idle' choice, hitting badly large BDP flows, and applications delivering chunks of data as video streams. Nicely spaced packets : Here interface is 10Gbit, but flow bottleneck is ~20Mbit cwin is big, yet FQ avoids the typical bursts generated by TCP (as in netperf TCP_RR -- -r 100000,100000) 15:01:23.545279 IP A > B: . 78193:81089(2896) ack 65248 win 3125 15:01:23.545394 IP B > A: . ack 81089 win 3668 15:01:23.546488 IP A > B: . 81089:83985(2896) ack 65248 win 3125 15:01:23.546565 IP B > A: . ack 83985 win 3668 15:01:23.547713 IP A > B: . 83985:86881(2896) ack 65248 win 3125 15:01:23.547778 IP B > A: . ack 86881 win 3668 15:01:23.548911 IP A > B: . 86881:89777(2896) ack 65248 win 3125 15:01:23.548949 IP B > A: . ack 89777 win 3668 15:01:23.550116 IP A > B: . 89777:92673(2896) ack 65248 win 3125 15:01:23.550182 IP B > A: . ack 92673 win 3668 15:01:23.551333 IP A > B: . 92673:95569(2896) ack 65248 win 3125 15:01:23.551406 IP B > A: . ack 95569 win 3668 15:01:23.552539 IP A > B: . 95569:98465(2896) ack 65248 win 3125 15:01:23.552576 IP B > A: . ack 98465 win 3668 15:01:23.553756 IP A > B: . 98465:99913(1448) ack 65248 win 3125 15:01:23.554138 IP A > B: P 99913:100001(88) ack 65248 win 3125 15:01:23.554204 IP B > A: . ack 100001 win 3668 15:01:23.554234 IP B > A: . 65248:68144(2896) ack 100001 win 3668 15:01:23.555620 IP B > A: . 68144:71040(2896) ack 100001 win 3668 15:01:23.557005 IP B > A: . 71040:73936(2896) ack 100001 win 3668 15:01:23.558390 IP B > A: . 73936:76832(2896) ack 100001 win 3668 15:01:23.559773 IP B > A: . 76832:79728(2896) ack 100001 win 3668 15:01:23.561158 IP B > A: . 79728:82624(2896) ack 100001 win 3668 15:01:23.562543 IP B > A: . 82624:85520(2896) ack 100001 win 3668 15:01:23.563928 IP B > A: . 85520:88416(2896) ack 100001 win 3668 15:01:23.565313 IP B > A: . 88416:91312(2896) ack 100001 win 3668 15:01:23.566698 IP B > A: . 91312:94208(2896) ack 100001 win 3668 15:01:23.568083 IP B > A: . 94208:97104(2896) ack 100001 win 3668 15:01:23.569467 IP B > A: . 97104:100000(2896) ack 100001 win 3668 15:01:23.570852 IP B > A: . 100000:102896(2896) ack 100001 win 3668 15:01:23.572237 IP B > A: . 102896:105792(2896) ack 100001 win 3668 15:01:23.573639 IP B > A: . 105792:108688(2896) ack 100001 win 3668 15:01:23.575024 IP B > A: . 108688:111584(2896) ack 100001 win 3668 15:01:23.576408 IP B > A: . 111584:114480(2896) ack 100001 win 3668 15:01:23.577793 IP B > A: . 114480:117376(2896) ack 100001 win 3668 TCP timestamps show that most packets from B were queued in the same ms timeframe (TSval 1159799{3,4}), but FQ managed to send them right in time to avoid a big burst. In slow start or steady state, very few packets are throttled [1] FQ gets a bunch of tunables as : limit : max number of packets on whole Qdisc (default 10000) flow_limit : max number of packets per flow (default 100) quantum : the credit per RR round (default is 2 MTU) initial_quantum : initial credit for new flows (default is 10 MTU) maxrate : max per flow rate (default : unlimited) buckets : number of RB trees (default : 1024) in hash table. (consumes 8 bytes per bucket) [no]pacing : disable/enable pacing (default is enable) All of them can be changed on a live qdisc. $ tc qd add dev eth0 root fq help Usage: ... fq [ limit PACKETS ] [ flow_limit PACKETS ] [ quantum BYTES ] [ initial_quantum BYTES ] [ maxrate RATE ] [ buckets NUMBER ] [ [no]pacing ] $ tc -s -d qd qdisc fq 8002: dev eth0 root refcnt 32 limit 10000p flow_limit 100p buckets 256 quantum 3028 initial_quantum 15140 Sent 216532416 bytes 148395 pkt (dropped 0, overlimits 0 requeues 14) backlog 0b 0p requeues 14 511 flows, 511 inactive, 0 throttled 110 gc, 0 highprio, 0 retrans, 1143 throttled, 0 flows_plimit [1] Except if initial srtt is overestimated, as if using cached srtt in tcp metrics. We'll provide a fix for this issue. Signed-off-by: Eric Dumazet Cc: Yuchung Cheng Cc: Neal Cardwell Signed-off-by: David S. Miller diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 09d62b92..9b82913 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -744,4 +744,45 @@ struct tc_fq_codel_xstats { }; }; +/* FQ */ + +enum { + TCA_FQ_UNSPEC, + + TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ + + TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ + + TCA_FQ_QUANTUM, /* RR quantum */ + + TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ + + TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ + + TCA_FQ_FLOW_DEFAULT_RATE,/* for sockets with unspecified sk_rate, + * use the following rate + */ + + TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ + + TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ + __TCA_FQ_MAX +}; + +#define TCA_FQ_MAX (__TCA_FQ_MAX - 1) + +struct tc_fq_qd_stats { + __u64 gc_flows; + __u64 highprio_packets; + __u64 tcp_retrans; + __u64 throttled; + __u64 flows_plimit; + __u64 pkts_too_long; + __u64 allocation_errors; + __s64 time_next_delayed_flow; + __u32 flows; + __u32 inactive_flows; + __u32 throttled_flows; + __u32 pad; +}; #endif diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 235e01a..c03a32a 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -272,6 +272,20 @@ config NET_SCH_FQ_CODEL If unsure, say N. +config NET_SCH_FQ + tristate "Fair Queue" + help + Say Y here if you want to use the FQ packet scheduling algorithm. + + FQ does flow separation, and is able to respect pacing requirements + set by TCP stack into sk->sk_pacing_rate (for localy generated + traffic) + + To compile this driver as a module, choose M here: the module + will be called sch_fq. + + If unsure, say N. + config NET_SCH_INGRESS tristate "Ingress Qdisc" depends on NET_CLS_ACT diff --git a/net/sched/Makefile b/net/sched/Makefile index 978cbf0..e5f9abe 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o +obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c new file mode 100644 index 0000000..91ceca7 --- /dev/null +++ b/net/sched/sch_fq.c @@ -0,0 +1,792 @@ +/* + * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing) + * + * Copyright (C) 2013 Eric Dumazet + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Meant to be mostly used for localy generated traffic : + * Fast classification depends on skb->sk being set before reaching us. + * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. + * All packets belonging to a socket are considered as a 'flow'. + * + * Flows are dynamically allocated and stored in a hash table of RB trees + * They are also part of one Round Robin 'queues' (new or old flows) + * + * Burst avoidance (aka pacing) capability : + * + * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a + * bunch of packets, and this packet scheduler adds delay between + * packets to respect rate limitation. + * + * enqueue() : + * - lookup one RB tree (out of 1024 or more) to find the flow. + * If non existent flow, create it, add it to the tree. + * Add skb to the per flow list of skb (fifo). + * - Use a special fifo for high prio packets + * + * dequeue() : serves flows in Round Robin + * Note : When a flow becomes empty, we do not immediately remove it from + * rb trees, for performance reasons (its expected to send additional packets, + * or SLAB cache will reuse socket for another flow) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Per flow structure, dynamically allocated + */ +struct fq_flow { + struct sk_buff *head; /* list of skbs for this flow : first skb */ + union { + struct sk_buff *tail; /* last skb in the list */ + unsigned long age; /* jiffies when flow was emptied, for gc */ + }; + struct rb_node fq_node; /* anchor in fq_root[] trees */ + struct sock *sk; + int qlen; /* number of packets in flow queue */ + int credit; + u32 socket_hash; /* sk_hash */ + struct fq_flow *next; /* next pointer in RR lists, or &detached */ + + struct rb_node rate_node; /* anchor in q->delayed tree */ + u64 time_next_packet; +}; + +struct fq_flow_head { + struct fq_flow *first; + struct fq_flow *last; +}; + +struct fq_sched_data { + struct fq_flow_head new_flows; + + struct fq_flow_head old_flows; + + struct rb_root delayed; /* for rate limited flows */ + u64 time_next_delayed_flow; + + struct fq_flow internal; /* for non classified or high prio packets */ + u32 quantum; + u32 initial_quantum; + u32 flow_default_rate;/* rate per flow : bytes per second */ + u32 flow_max_rate; /* optional max rate per flow */ + u32 flow_plimit; /* max packets per flow */ + struct rb_root *fq_root; + u8 rate_enable; + u8 fq_trees_log; + + u32 flows; + u32 inactive_flows; + u32 throttled_flows; + + u64 stat_gc_flows; + u64 stat_internal_packets; + u64 stat_tcp_retrans; + u64 stat_throttled; + u64 stat_flows_plimit; + u64 stat_pkts_too_long; + u64 stat_allocation_errors; + struct qdisc_watchdog watchdog; +}; + +/* special value to mark a detached flow (not on old/new list) */ +static struct fq_flow detached, throttled; + +static void fq_flow_set_detached(struct fq_flow *f) +{ + f->next = &detached; +} + +static bool fq_flow_is_detached(const struct fq_flow *f) +{ + return f->next == &detached; +} + +static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) +{ + struct rb_node **p = &q->delayed.rb_node, *parent = NULL; + + while (*p) { + struct fq_flow *aux; + + parent = *p; + aux = container_of(parent, struct fq_flow, rate_node); + if (f->time_next_packet >= aux->time_next_packet) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + rb_link_node(&f->rate_node, parent, p); + rb_insert_color(&f->rate_node, &q->delayed); + q->throttled_flows++; + q->stat_throttled++; + + f->next = &throttled; + if (q->time_next_delayed_flow > f->time_next_packet) + q->time_next_delayed_flow = f->time_next_packet; +} + + +static struct kmem_cache *fq_flow_cachep __read_mostly; + +static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) +{ + if (head->first) + head->last->next = flow; + else + head->first = flow; + head->last = flow; + flow->next = NULL; +} + +/* limit number of collected flows per round */ +#define FQ_GC_MAX 8 +#define FQ_GC_AGE (3*HZ) + +static bool fq_gc_candidate(const struct fq_flow *f) +{ + return fq_flow_is_detached(f) && + time_after(jiffies, f->age + FQ_GC_AGE); +} + +static void fq_gc(struct fq_sched_data *q, + struct rb_root *root, + struct sock *sk) +{ + struct fq_flow *f, *tofree[FQ_GC_MAX]; + struct rb_node **p, *parent; + int fcnt = 0; + + p = &root->rb_node; + parent = NULL; + while (*p) { + parent = *p; + + f = container_of(parent, struct fq_flow, fq_node); + if (f->sk == sk) + break; + + if (fq_gc_candidate(f)) { + tofree[fcnt++] = f; + if (fcnt == FQ_GC_MAX) + break; + } + + if (f->sk > sk) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + + q->flows -= fcnt; + q->inactive_flows -= fcnt; + q->stat_gc_flows += fcnt; + while (fcnt) { + struct fq_flow *f = tofree[--fcnt]; + + rb_erase(&f->fq_node, root); + kmem_cache_free(fq_flow_cachep, f); + } +} + +static const u8 prio2band[TC_PRIO_MAX + 1] = { + 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 +}; + +static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) +{ + struct rb_node **p, *parent; + struct sock *sk = skb->sk; + struct rb_root *root; + struct fq_flow *f; + int band; + + /* warning: no starvation prevention... */ + band = prio2band[skb->priority & TC_PRIO_MAX]; + if (unlikely(band == 0)) + return &q->internal; + + if (unlikely(!sk)) { + /* By forcing low order bit to 1, we make sure to not + * collide with a local flow (socket pointers are word aligned) + */ + sk = (struct sock *)(skb_get_rxhash(skb) | 1L); + } + + root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; + + if (q->flows >= (2U << q->fq_trees_log) && + q->inactive_flows > q->flows/2) + fq_gc(q, root, sk); + + p = &root->rb_node; + parent = NULL; + while (*p) { + parent = *p; + + f = container_of(parent, struct fq_flow, fq_node); + if (f->sk == sk) { + /* socket might have been reallocated, so check + * if its sk_hash is the same. + * It not, we need to refill credit with + * initial quantum + */ + if (unlikely(skb->sk && + f->socket_hash != sk->sk_hash)) { + f->credit = q->initial_quantum; + f->socket_hash = sk->sk_hash; + } + return f; + } + if (f->sk > sk) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + + f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!f)) { + q->stat_allocation_errors++; + return &q->internal; + } + fq_flow_set_detached(f); + f->sk = sk; + if (skb->sk) + f->socket_hash = sk->sk_hash; + f->credit = q->initial_quantum; + + rb_link_node(&f->fq_node, parent, p); + rb_insert_color(&f->fq_node, root); + + q->flows++; + q->inactive_flows++; + return f; +} + + +/* remove one skb from head of flow queue */ +static struct sk_buff *fq_dequeue_head(struct fq_flow *flow) +{ + struct sk_buff *skb = flow->head; + + if (skb) { + flow->head = skb->next; + skb->next = NULL; + flow->qlen--; + } + return skb; +} + +/* We might add in the future detection of retransmits + * For the time being, just return false + */ +static bool skb_is_retransmit(struct sk_buff *skb) +{ + return false; +} + +/* add skb to flow queue + * flow queue is a linked list, kind of FIFO, except for TCP retransmits + * We special case tcp retransmits to be transmitted before other packets. + * We rely on fact that TCP retransmits are unlikely, so we do not waste + * a separate queue or a pointer. + * head-> [retrans pkt 1] + * [retrans pkt 2] + * [ normal pkt 1] + * [ normal pkt 2] + * [ normal pkt 3] + * tail-> [ normal pkt 4] + */ +static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) +{ + struct sk_buff *prev, *head = flow->head; + + skb->next = NULL; + if (!head) { + flow->head = skb; + flow->tail = skb; + return; + } + if (likely(!skb_is_retransmit(skb))) { + flow->tail->next = skb; + flow->tail = skb; + return; + } + + /* This skb is a tcp retransmit, + * find the last retrans packet in the queue + */ + prev = NULL; + while (skb_is_retransmit(head)) { + prev = head; + head = head->next; + if (!head) + break; + } + if (!prev) { /* no rtx packet in queue, become the new head */ + skb->next = flow->head; + flow->head = skb; + } else { + if (prev == flow->tail) + flow->tail = skb; + else + skb->next = prev->next; + prev->next = skb; + } +} + +static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + struct fq_sched_data *q = qdisc_priv(sch); + struct fq_flow *f; + + if (unlikely(sch->q.qlen >= sch->limit)) + return qdisc_drop(skb, sch); + + f = fq_classify(skb, q); + if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { + q->stat_flows_plimit++; + return qdisc_drop(skb, sch); + } + + f->qlen++; + flow_queue_add(f, skb); + if (skb_is_retransmit(skb)) + q->stat_tcp_retrans++; + sch->qstats.backlog += qdisc_pkt_len(skb); + if (fq_flow_is_detached(f)) { + fq_flow_add_tail(&q->new_flows, f); + if (q->quantum > f->credit) + f->credit = q->quantum; + q->inactive_flows--; + qdisc_unthrottled(sch); + } + if (unlikely(f == &q->internal)) { + q->stat_internal_packets++; + qdisc_unthrottled(sch); + } + sch->q.qlen++; + + return NET_XMIT_SUCCESS; +} + +static void fq_check_throttled(struct fq_sched_data *q, u64 now) +{ + struct rb_node *p; + + if (q->time_next_delayed_flow > now) + return; + + q->time_next_delayed_flow = ~0ULL; + while ((p = rb_first(&q->delayed)) != NULL) { + struct fq_flow *f = container_of(p, struct fq_flow, rate_node); + + if (f->time_next_packet > now) { + q->time_next_delayed_flow = f->time_next_packet; + break; + } + rb_erase(p, &q->delayed); + q->throttled_flows--; + fq_flow_add_tail(&q->old_flows, f); + } +} + +static struct sk_buff *fq_dequeue(struct Qdisc *sch) +{ + struct fq_sched_data *q = qdisc_priv(sch); + u64 now = ktime_to_ns(ktime_get()); + struct fq_flow_head *head; + struct sk_buff *skb; + struct fq_flow *f; + + skb = fq_dequeue_head(&q->internal); + if (skb) + goto out; + fq_check_throttled(q, now); +begin: + head = &q->new_flows; + if (!head->first) { + head = &q->old_flows; + if (!head->first) { + if (q->time_next_delayed_flow != ~0ULL) + qdisc_watchdog_schedule_ns(&q->watchdog, + q->time_next_delayed_flow); + return NULL; + } + } + f = head->first; + + if (f->credit <= 0) { + f->credit += q->quantum; + head->first = f->next; + fq_flow_add_tail(&q->old_flows, f); + goto begin; + } + + if (unlikely(f->head && now < f->time_next_packet)) { + head->first = f->next; + fq_flow_set_throttled(q, f); + goto begin; + } + + skb = fq_dequeue_head(f); + if (!skb) { + head->first = f->next; + /* force a pass through old_flows to prevent starvation */ + if ((head == &q->new_flows) && q->old_flows.first) { + fq_flow_add_tail(&q->old_flows, f); + } else { + fq_flow_set_detached(f); + f->age = jiffies; + q->inactive_flows++; + } + goto begin; + } + f->time_next_packet = now; + f->credit -= qdisc_pkt_len(skb); + + if (f->credit <= 0 && + q->rate_enable && + skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { + u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; + + rate = min(rate, q->flow_max_rate); + if (rate) { + u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC; + + do_div(len, rate); + /* Since socket rate can change later, + * clamp the delay to 125 ms. + * TODO: maybe segment the too big skb, as in commit + * e43ac79a4bc ("sch_tbf: segment too big GSO packets") + */ + if (unlikely(len > 125 * NSEC_PER_MSEC)) { + len = 125 * NSEC_PER_MSEC; + q->stat_pkts_too_long++; + } + + f->time_next_packet = now + len; + } + } +out: + prefetch(&skb->end); + sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); + sch->q.qlen--; + qdisc_unthrottled(sch); + return skb; +} + +static void fq_reset(struct Qdisc *sch) +{ + struct sk_buff *skb; + + while ((skb = fq_dequeue(sch)) != NULL) + kfree_skb(skb); +} + +static void fq_rehash(struct fq_sched_data *q, + struct rb_root *old_array, u32 old_log, + struct rb_root *new_array, u32 new_log) +{ + struct rb_node *op, **np, *parent; + struct rb_root *oroot, *nroot; + struct fq_flow *of, *nf; + int fcnt = 0; + u32 idx; + + for (idx = 0; idx < (1U << old_log); idx++) { + oroot = &old_array[idx]; + while ((op = rb_first(oroot)) != NULL) { + rb_erase(op, oroot); + of = container_of(op, struct fq_flow, fq_node); + if (fq_gc_candidate(of)) { + fcnt++; + kmem_cache_free(fq_flow_cachep, of); + continue; + } + nroot = &new_array[hash_32((u32)(long)of->sk, new_log)]; + + np = &nroot->rb_node; + parent = NULL; + while (*np) { + parent = *np; + + nf = container_of(parent, struct fq_flow, fq_node); + BUG_ON(nf->sk == of->sk); + + if (nf->sk > of->sk) + np = &parent->rb_right; + else + np = &parent->rb_left; + } + + rb_link_node(&of->fq_node, parent, np); + rb_insert_color(&of->fq_node, nroot); + } + } + q->flows -= fcnt; + q->inactive_flows -= fcnt; + q->stat_gc_flows += fcnt; +} + +static int fq_resize(struct fq_sched_data *q, u32 log) +{ + struct rb_root *array; + u32 idx; + + if (q->fq_root && log == q->fq_trees_log) + return 0; + + array = kmalloc(sizeof(struct rb_root) << log, GFP_KERNEL); + if (!array) + return -ENOMEM; + + for (idx = 0; idx < (1U << log); idx++) + array[idx] = RB_ROOT; + + if (q->fq_root) { + fq_rehash(q, q->fq_root, q->fq_trees_log, array, log); + kfree(q->fq_root); + } + q->fq_root = array; + q->fq_trees_log = log; + + return 0; +} + +static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { + [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, + [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, + [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, + [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 }, + [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, + [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, + [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, + [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, +}; + +static int fq_change(struct Qdisc *sch, struct nlattr *opt) +{ + struct fq_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_FQ_MAX + 1]; + int err, drop_count = 0; + u32 fq_log; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy); + if (err < 0) + return err; + + sch_tree_lock(sch); + + fq_log = q->fq_trees_log; + + if (tb[TCA_FQ_BUCKETS_LOG]) { + u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); + + if (nval >= 1 && nval <= ilog2(256*1024)) + fq_log = nval; + else + err = -EINVAL; + } + if (tb[TCA_FQ_PLIMIT]) + sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); + + if (tb[TCA_FQ_FLOW_PLIMIT]) + q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); + + if (tb[TCA_FQ_QUANTUM]) + q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); + + if (tb[TCA_FQ_INITIAL_QUANTUM]) + q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); + + if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) + q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); + + if (tb[TCA_FQ_FLOW_MAX_RATE]) + q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); + + if (tb[TCA_FQ_RATE_ENABLE]) { + u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); + + if (enable <= 1) + q->rate_enable = enable; + else + err = -EINVAL; + } + + if (!err) + err = fq_resize(q, fq_log); + + while (sch->q.qlen > sch->limit) { + struct sk_buff *skb = fq_dequeue(sch); + + kfree_skb(skb); + drop_count++; + } + qdisc_tree_decrease_qlen(sch, drop_count); + + sch_tree_unlock(sch); + return err; +} + +static void fq_destroy(struct Qdisc *sch) +{ + struct fq_sched_data *q = qdisc_priv(sch); + struct rb_root *root; + struct rb_node *p; + unsigned int idx; + + if (q->fq_root) { + for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { + root = &q->fq_root[idx]; + while ((p = rb_first(root)) != NULL) { + rb_erase(p, root); + kmem_cache_free(fq_flow_cachep, + container_of(p, struct fq_flow, fq_node)); + } + } + kfree(q->fq_root); + } + qdisc_watchdog_cancel(&q->watchdog); +} + +static int fq_init(struct Qdisc *sch, struct nlattr *opt) +{ + struct fq_sched_data *q = qdisc_priv(sch); + int err; + + sch->limit = 10000; + q->flow_plimit = 100; + q->quantum = 2 * psched_mtu(qdisc_dev(sch)); + q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); + q->flow_default_rate = 0; + q->flow_max_rate = ~0U; + q->rate_enable = 1; + q->new_flows.first = NULL; + q->old_flows.first = NULL; + q->delayed = RB_ROOT; + q->fq_root = NULL; + q->fq_trees_log = ilog2(1024); + qdisc_watchdog_init(&q->watchdog, sch); + + if (opt) + err = fq_change(sch, opt); + else + err = fq_resize(q, q->fq_trees_log); + + return err; +} + +static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct fq_sched_data *q = qdisc_priv(sch); + struct nlattr *opts; + + opts = nla_nest_start(skb, TCA_OPTIONS); + if (opts == NULL) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || + nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || + nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || + nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || + nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || + nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) || + nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || + nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) + goto nla_put_failure; + + nla_nest_end(skb, opts); + return skb->len; + +nla_put_failure: + return -1; +} + +static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + struct fq_sched_data *q = qdisc_priv(sch); + u64 now = ktime_to_ns(ktime_get()); + struct tc_fq_qd_stats st = { + .gc_flows = q->stat_gc_flows, + .highprio_packets = q->stat_internal_packets, + .tcp_retrans = q->stat_tcp_retrans, + .throttled = q->stat_throttled, + .flows_plimit = q->stat_flows_plimit, + .pkts_too_long = q->stat_pkts_too_long, + .allocation_errors = q->stat_allocation_errors, + .flows = q->flows, + .inactive_flows = q->inactive_flows, + .throttled_flows = q->throttled_flows, + .time_next_delayed_flow = q->time_next_delayed_flow - now, + }; + + return gnet_stats_copy_app(d, &st, sizeof(st)); +} + +static struct Qdisc_ops fq_qdisc_ops __read_mostly = { + .id = "fq", + .priv_size = sizeof(struct fq_sched_data), + + .enqueue = fq_enqueue, + .dequeue = fq_dequeue, + .peek = qdisc_peek_dequeued, + .init = fq_init, + .reset = fq_reset, + .destroy = fq_destroy, + .change = fq_change, + .dump = fq_dump, + .dump_stats = fq_dump_stats, + .owner = THIS_MODULE, +}; + +static int __init fq_module_init(void) +{ + int ret; + + fq_flow_cachep = kmem_cache_create("fq_flow_cache", + sizeof(struct fq_flow), + 0, 0, NULL); + if (!fq_flow_cachep) + return -ENOMEM; + + ret = register_qdisc(&fq_qdisc_ops); + if (ret) + kmem_cache_destroy(fq_flow_cachep); + return ret; +} + +static void __exit fq_module_exit(void) +{ + unregister_qdisc(&fq_qdisc_ops); + kmem_cache_destroy(fq_flow_cachep); +} + +module_init(fq_module_init) +module_exit(fq_module_exit) +MODULE_AUTHOR("Eric Dumazet"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From ede23fa8161c1a04aa1b3bf5447812ca14b3fef1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 26 Aug 2013 22:45:23 -0700 Subject: drivers:net: Convert dma_alloc_coherent(...__GFP_ZERO) to dma_zalloc_coherent __GFP_ZERO is an uncommon flag and perhaps is better not used. static inline dma_zalloc_coherent exists so convert the uses of dma_alloc_coherent with __GFP_ZERO to the more common kernel style with zalloc. Remove memset from the static inline dma_zalloc_coherent and add just one use of __GFP_ZERO instead. Trivially reduces the size of the existing uses of dma_zalloc_coherent. Realign arguments as appropriate. Signed-off-by: Joe Perches Acked-by: Neil Horman Acked-by: Jesse Brandeburg Acked-by: Jeff Kirsher Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 7ff4b30..e066945 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1464,18 +1464,18 @@ static int greth_of_probe(struct platform_device *ofdev) } /* Allocate TX descriptor ring in coherent memory */ - greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, - &greth->tx_bd_base_phys, - GFP_KERNEL | __GFP_ZERO); + greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, + &greth->tx_bd_base_phys, + GFP_KERNEL); if (!greth->tx_bd_base) { err = -ENOMEM; goto error3; } /* Allocate RX descriptor ring in coherent memory */ - greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, - &greth->rx_bd_base_phys, - GFP_KERNEL | __GFP_ZERO); + greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, + &greth->rx_bd_base_phys, + GFP_KERNEL); if (!greth->rx_bd_base) { err = -ENOMEM; goto error4; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 190219e..98b6870 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev) /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, - GFP_KERNEL | __GFP_ZERO); + p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_freeirq_tx; @@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev) /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, - GFP_KERNEL | __GFP_ZERO); + p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_free_rx_ring; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 4148058..e838a3f 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp) bp->status_stats_size = status_blk_size + sizeof(struct statistics_block); - status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, - &bp->status_blk_mapping, - GFP_KERNEL | __GFP_ZERO); + status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, + &bp->status_blk_mapping, GFP_KERNEL); if (status_blk == NULL) goto alloc_mem_err; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 12202f8..3e77a1b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2069,9 +2069,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf); -#define BNX2X_ILT_ZALLOC(x, y, size) \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ - GFP_KERNEL | __GFP_ZERO) +#define BNX2X_ILT_ZALLOC(x, y, size) \ + x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) #define BNX2X_ILT_FREE(x, y, size) \ do { \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 38be494..affb764 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -51,8 +51,7 @@ extern int int_mode; #define BNX2X_PCI_ALLOC(x, y, size) \ do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ - GFP_KERNEL | __GFP_ZERO); \ + x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ if (x == NULL) \ goto alloc_mem_err; \ DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 95b8995..2e55ee2 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8591,10 +8591,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp) if (!i && tg3_flag(tp, ENABLE_RSS)) continue; - tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, - TG3_RX_RCB_RING_BYTES(tp), - &tnapi->rx_rcb_mapping, - GFP_KERNEL | __GFP_ZERO); + tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); if (!tnapi->rx_rcb) goto err_out; } @@ -8643,10 +8643,9 @@ static int tg3_alloc_consistent(struct tg3 *tp) { int i; - tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping, - GFP_KERNEL | __GFP_ZERO); + tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, GFP_KERNEL); if (!tp->hw_stats) goto err_out; @@ -8654,10 +8653,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_hw_status *sblk; - tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, - TG3_HW_STATUS_SIZE, - &tnapi->status_mapping, - GFP_KERNEL | __GFP_ZERO); + tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); if (!tnapi->hw_status) goto err_out; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 50116f8..39e0a76 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -145,8 +145,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; - mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, - GFP_KERNEL | __GFP_ZERO); + mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, + GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; @@ -2642,8 +2642,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) memset(mac, 0, ETH_ALEN); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL | __GFP_ZERO); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); if (cmd.va == NULL) return -1; @@ -3946,9 +3946,9 @@ static int be_ctrl_init(struct be_adapter *adapter) memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); - rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, - &rx_filter->dma, - GFP_KERNEL | __GFP_ZERO); + rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev, + rx_filter->size, &rx_filter->dma, + GFP_KERNEL); if (rx_filter->va == NULL) { status = -ENOMEM; goto free_mbox; @@ -3994,8 +3994,8 @@ static int be_stats_init(struct be_adapter *adapter) /* BE3 and Skyhawk */ cmd->size = sizeof(struct be_cmd_req_get_stats_v1); - cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, - GFP_KERNEL | __GFP_ZERO); + cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, + GFP_KERNEL); if (cmd->va == NULL) return -1; return 0; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 934e1ae..212f44b 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -778,10 +778,9 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv) { int i; - priv->descs = dma_alloc_coherent(priv->dev, - sizeof(struct ftgmac100_descs), - &priv->descs_dma_addr, - GFP_KERNEL | __GFP_ZERO); + priv->descs = dma_zalloc_coherent(priv->dev, + sizeof(struct ftgmac100_descs), + &priv->descs_dma_addr, GFP_KERNEL); if (!priv->descs) return -ENOMEM; diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 4658f4c..8be5b40 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -732,10 +732,10 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv) { int i; - priv->descs = dma_alloc_coherent(priv->dev, - sizeof(struct ftmac100_descs), - &priv->descs_dma_addr, - GFP_KERNEL | __GFP_ZERO); + priv->descs = dma_zalloc_coherent(priv->dev, + sizeof(struct ftmac100_descs), + &priv->descs_dma_addr, + GFP_KERNEL); if (!priv->descs) return -ENOMEM; diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 856ea66..dac564c 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -637,8 +637,8 @@ static int mal_probe(struct platform_device *ofdev) bd_size = sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + NUM_RX_BUFF * mal->num_rx_chans); - mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, - GFP_KERNEL | __GFP_ZERO); + mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, + GFP_KERNEL); if (mal->bd_virt == NULL) { err = -ENOMEM; goto fail_unmap; diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 82a967c..73a8aee 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1019,8 +1019,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) txdr->size = txdr->count * sizeof(struct e1000_tx_desc); txdr->size = ALIGN(txdr->size, 4096); - txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL | __GFP_ZERO); + txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); if (!txdr->desc) { ret_val = 2; goto err_nomem; @@ -1077,8 +1077,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) } rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); - rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL | __GFP_ZERO); + rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); if (!rxdr->desc) { ret_val = 6; goto err_nomem; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index fce3e92..9f6b236 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -718,8 +718,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); txdr->size = ALIGN(txdr->size, 4096); - txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL | __GFP_ZERO); + txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); if (!txdr->desc) { vfree(txdr->buffer_info); return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index db48147..3e69feb 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -583,10 +583,9 @@ static int init_hash_table(struct pxa168_eth_private *pep) * table is full. */ if (pep->htpr == NULL) { - pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, - HASH_ADDR_TABLE_SIZE, - &pep->htpr_dma, - GFP_KERNEL | __GFP_ZERO); + pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, + HASH_ADDR_TABLE_SIZE, + &pep->htpr_dma, GFP_KERNEL); if (pep->htpr == NULL) return -ENOMEM; } else { @@ -1024,9 +1023,9 @@ static int rxq_init(struct net_device *dev) pep->rx_desc_count = 0; size = pep->rx_ring_size * sizeof(struct rx_desc); pep->rx_desc_area_size = size; - pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, - &pep->rx_desc_dma, - GFP_KERNEL | __GFP_ZERO); + pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, + &pep->rx_desc_dma, + GFP_KERNEL); if (!pep->p_rx_desc_area) goto out; @@ -1085,9 +1084,9 @@ static int txq_init(struct net_device *dev) pep->tx_desc_count = 0; size = pep->tx_ring_size * sizeof(struct tx_desc); pep->tx_desc_area_size = size; - pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, - &pep->tx_desc_dma, - GFP_KERNEL | __GFP_ZERO); + pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, + &pep->tx_desc_dma, + GFP_KERNEL); if (!pep->p_tx_desc_area) goto out; /* Initialize the next_desc_ptr links in the Tx descriptors ring */ diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 50a1d4a..149355b 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3783,9 +3783,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) for (i = 0; i < mgp->num_slices; i++) { ss = &mgp->ss[i]; bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); - ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, - &ss->rx_done.bus, - GFP_KERNEL | __GFP_ZERO); + ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, + &ss->rx_done.bus, + GFP_KERNEL); if (ss->rx_done.entry == NULL) goto abort; bytes = sizeof(*ss->fw_stats); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index e19f1be..5a0f04c 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1491,9 +1491,9 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, bufsz = adapter->rx_buffer_len; size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; - rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, - &rx_ring->rx_buff_pool_logic, - GFP_KERNEL | __GFP_ZERO); + rx_ring->rx_buff_pool = + dma_zalloc_coherent(&pdev->dev, size, + &rx_ring->rx_buff_pool_logic, GFP_KERNEL); if (!rx_ring->rx_buff_pool) return -ENOMEM; @@ -1807,9 +1807,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); - tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, - &tx_ring->dma, - GFP_KERNEL | __GFP_ZERO); + tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { vfree(tx_ring->buffer_info); return -ENOMEM; @@ -1852,9 +1851,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, return -ENOMEM; rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); - rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, - &rx_ring->dma, - GFP_KERNEL | __GFP_ZERO); + rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { vfree(rx_ring->buffer_info); return -ENOMEM; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index f21ae7b..c498181 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -440,10 +440,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev) if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) goto out_ring_desc; - ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * sizeof(u64), - &ring->buf_dma, - GFP_KERNEL | __GFP_ZERO); + ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, + RX_RING_SIZE * sizeof(u64), + &ring->buf_dma, GFP_KERNEL); if (!ring->buffers) goto out_ring_desc; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index bf3b17e..86850dd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -448,14 +448,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, *(tx_ring->hw_consumer) = 0; rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); - rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, - &rq_phys_addr, GFP_KERNEL | __GFP_ZERO); + rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, + &rq_phys_addr, GFP_KERNEL); if (!rq_addr) return -ENOMEM; rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); - rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, - &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO); + rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, + &rsp_phys_addr, GFP_KERNEL); if (!rsp_addr) { err = -ENOMEM; goto out_free_rq; @@ -865,8 +865,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args cmd; size_t nic_size = sizeof(struct qlcnic_info_le); - nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL | __GFP_ZERO); + nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); if (!nic_info_addr) return -ENOMEM; @@ -919,8 +919,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) return err; - nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL | __GFP_ZERO); + nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); if (!nic_info_addr) return -ENOMEM; @@ -972,9 +972,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, size_t npar_size = sizeof(struct qlcnic_pci_info_le); size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; - pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, - &pci_info_dma_t, - GFP_KERNEL | __GFP_ZERO); + pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, + &pci_info_dma_t, GFP_KERNEL); if (!pci_info_addr) return -ENOMEM; @@ -1074,8 +1073,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, return -EIO; } - stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL | __GFP_ZERO); + stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); if (!stats_addr) return -ENOMEM; @@ -1130,8 +1129,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, if (mac_stats == NULL) return -ENOMEM; - stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL | __GFP_ZERO); + stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); if (!stats_addr) return -ENOMEM; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index b9b1277..78d41331 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -33,9 +33,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags) { - buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, - &buffer->dma_addr, - gfp_flags | __GFP_ZERO); + buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, + &buffer->dma_addr, gfp_flags); if (!buffer->addr) return -ENOMEM; buffer->len = len; diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 9f5f35e..770036b 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -212,9 +212,8 @@ static void meth_check_link(struct net_device *dev) static int meth_init_tx_ring(struct meth_private *priv) { /* Init TX ring */ - priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, - &priv->tx_ring_dma, - GFP_ATOMIC | __GFP_ZERO); + priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, + &priv->tx_ring_dma, GFP_ATOMIC); if (!priv->tx_ring) return -ENOMEM; diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01bdc6c..f2e76c9 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1308,13 +1308,13 @@ static int tsi108_open(struct net_device *dev) data->id, dev->irq, dev->name); } - data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma, - GFP_KERNEL | __GFP_ZERO); + data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma, + GFP_KERNEL); if (!data->rxring) return -ENOMEM; - data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma, - GFP_KERNEL | __GFP_ZERO); + data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma, + GFP_KERNEL); if (!data->txring) { pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); return -ENOMEM; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 58eb448..b88121f 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev) /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */ - lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, - &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO); + lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; - lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, - &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO); + lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index fb7d1c2..b2ff038 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -201,17 +201,15 @@ static int axienet_dma_bd_init(struct net_device *ndev) /* * Allocate the Tx and Rx buffer descriptors. */ - lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, - &lp->tx_bd_p, - GFP_KERNEL | __GFP_ZERO); + lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; - lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, - &lp->rx_bd_p, - GFP_KERNEL | __GFP_ZERO); + lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 4c8ddc9..0b40e1c 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -1068,9 +1068,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, #endif sizeof(PI_CONSUMER_BLOCK) + (PI_ALIGN_K_DESC_BLK - 1); - bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, - &bp->kmalloced_dma, - GFP_ATOMIC | __GFP_ZERO); + bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, + &bp->kmalloced_dma, + GFP_ATOMIC); if (top_v == NULL) return DFX_K_FAILURE; diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 3adb43c..7bbd318 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -351,16 +351,16 @@ static int ali_ircc_open(int i, chipio_t *info) /* Allocate memory if needed */ self->rx_buff.head = - dma_alloc_coherent(NULL, self->rx_buff.truesize, - &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->rx_buff.truesize, + &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; goto err_out2; } self->tx_buff.head = - dma_alloc_coherent(NULL, self->tx_buff.truesize, - &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->tx_buff.truesize, + &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; goto err_out3; diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 9cf836b..ceeb537 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -430,8 +430,8 @@ static int __init nsc_ircc_open(chipio_t *info) /* Allocate memory if needed */ self->rx_buff.head = - dma_alloc_coherent(NULL, self->rx_buff.truesize, - &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->rx_buff.truesize, + &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; goto out2; @@ -439,8 +439,8 @@ static int __init nsc_ircc_open(chipio_t *info) } self->tx_buff.head = - dma_alloc_coherent(NULL, self->tx_buff.truesize, - &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->tx_buff.truesize, + &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; goto out3; diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index aa05dad..0dcdf15 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -562,14 +562,14 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE; self->rx_buff.head = - dma_alloc_coherent(NULL, self->rx_buff.truesize, - &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->rx_buff.truesize, + &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) goto err_out2; self->tx_buff.head = - dma_alloc_coherent(NULL, self->tx_buff.truesize, - &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->tx_buff.truesize, + &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) goto err_out3; diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 2dcc60f..9abaec2 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -361,16 +361,16 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id) /* Allocate memory if needed */ self->rx_buff.head = - dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize, - &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize, + &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; goto err_out2; } self->tx_buff.head = - dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize, - &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize, + &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; goto err_out3; diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index bb8857a..e641bb2 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -215,16 +215,16 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq, /* Allocate memory if needed */ self->rx_buff.head = - dma_alloc_coherent(NULL, self->rx_buff.truesize, - &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->rx_buff.truesize, + &self->rx_buff_dma, GFP_KERNEL); if (self->rx_buff.head == NULL) { err = -ENOMEM; goto err_out1; } self->tx_buff.head = - dma_alloc_coherent(NULL, self->tx_buff.truesize, - &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO); + dma_zalloc_coherent(NULL, self->tx_buff.truesize, + &self->tx_buff_dma, GFP_KERNEL); if (self->tx_buff.head == NULL) { err = -ENOMEM; goto err_out2; diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index f7c70b3..c51d2dc 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring) u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; - ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, - ring_mem_size, &(ring->dmabase), - GFP_KERNEL | __GFP_ZERO); + ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, + ring_mem_size, &(ring->dmabase), + GFP_KERNEL); if (!ring->descbase) return -ENOMEM; diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index faeafe2..42eb26c 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -331,10 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, static int alloc_ringmemory(struct b43legacy_dmaring *ring) { /* GFP flags must match the flags in free_ringmemory()! */ - ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, - B43legacy_DMA_RINGMEMSIZE, - &(ring->dmabase), - GFP_KERNEL | __GFP_ZERO); + ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, + B43legacy_DMA_RINGMEMSIZE, + &(ring->dmabase), GFP_KERNEL); if (!ring->descbase) return -ENOMEM; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 94af418..3a8d0a2 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -132,9 +132,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) static inline void *dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); - if (ret) - memset(ret, 0, size); + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); return ret; } -- cgit v0.10.2 From 08f89b981b0e6b46058dae2957241d4099129521 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 30 Aug 2013 09:46:43 -0700 Subject: pkt_sched: fq: prefetch() fix kbuild bot reported following m68k build error : net/sched/sch_fq.c: In function 'fq_dequeue': >> net/sched/sch_fq.c:491:2: error: implicit declaration of function 'prefetch' [-Werror=implicit-function-declaration] cc1: some warnings being treated as errors While we are fixing this, move this prefetch() call a bit earlier. Reported-by: Wu Fengguang Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 91ceca7..32ad015 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -461,6 +462,7 @@ begin: } goto begin; } + prefetch(&skb->end); f->time_next_packet = now; f->credit -= qdisc_pkt_len(skb); @@ -488,7 +490,6 @@ begin: } } out: - prefetch(&skb->end); sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb); sch->q.qlen--; -- cgit v0.10.2 From 1b7fdd2ab5852717a4fc7d79847759c67065d7e9 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Fri, 30 Aug 2013 08:35:53 -0700 Subject: tcp: do not use cached RTT for RTT estimation RTT cached in the TCP metrics are valuable for the initial timeout because SYN RTT usually does not account for serialization delays on low BW path. However using it to seed the RTT estimator maybe disruptive because other components (e.g., pacing) require the smooth RTT to be obtained from actual connection. The solution is to use the higher cached RTT to set the first RTO conservatively like tcp_rtt_estimator(), but avoid seeding the other RTT estimator variables such as srtt. It is also a good idea to keep RTO conservative to obtain the first RTT sample, and the performance is insured by TCP loss probe if SYN RTT is available. To keep the seeding formula consistent across SYN RTT and cached RTT, the rttvar is twice the cached RTT instead of cached RTTVAR value. The reason is because cached variation may be too small (near min RTO) which defeats the purpose of being conservative on first RTO. However the metrics still keep the RTT variations as they might be useful for user applications (through ip). Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: Eric Dumazet Tested-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index f6a005c..273ed73 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -443,7 +443,7 @@ void tcp_init_metrics(struct sock *sk) struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_metrics_block *tm; - u32 val; + u32 val, crtt = 0; /* cached RTT scaled by 8 */ if (dst == NULL) goto reset; @@ -478,40 +478,18 @@ void tcp_init_metrics(struct sock *sk) tp->reordering = val; } - val = tcp_metric_get(tm, TCP_METRIC_RTT); - if (val == 0 || tp->srtt == 0) { - rcu_read_unlock(); - goto reset; - } - /* Initial rtt is determined from SYN,SYN-ACK. - * The segment is small and rtt may appear much - * less than real one. Use per-dst memory - * to make it more realistic. - * - * A bit of theory. RTT is time passed after "normal" sized packet - * is sent until it is ACKed. In normal circumstances sending small - * packets force peer to delay ACKs and calculation is correct too. - * The algorithm is adaptive and, provided we follow specs, it - * NEVER underestimate RTT. BUT! If peer tries to make some clever - * tricks sort of "quick acks" for time long enough to decrease RTT - * to low value, and then abruptly stops to do it and starts to delay - * ACKs, wait for troubles. - */ - val = msecs_to_jiffies(val); - if (val > tp->srtt) { - tp->srtt = val; - tp->rtt_seq = tp->snd_nxt; - } - val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR); - if (val > tp->mdev) { - tp->mdev = val; - tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); - } + crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT); rcu_read_unlock(); - - tcp_set_rto(sk); reset: - if (tp->srtt == 0) { + if (crtt > tp->srtt) { + /* Initial RTT (tp->srtt) from SYN usually don't measure + * serialization delay on low BW links well so RTO may be + * under-estimated. Stay conservative and seed RTO with + * the RTTs from past data exchanges, using the same seeding + * formula in tcp_rtt_estimator(). + */ + inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk)); + } else if (tp->srtt == 0) { /* RFC6298: 5.7 We've failed to get a valid RTT sample from * 3WHS. This is most likely due to retransmission, * including spurious one. Reset the RTO back to 3secs -- cgit v0.10.2 From 816c5b5b016acde51f0547cf1657d87cf22a6d47 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Wed, 28 Aug 2013 12:05:38 +0200 Subject: ipv6: Remove redundant sk variable A sk variable initialized to ndisc_sk is already available outside of the branch. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 41720fe..bb6fd95 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -428,7 +428,6 @@ static void ndisc_send_skb(struct sk_buff *skb, type = icmp6h->icmp6_type; if (!dst) { - struct sock *sk = net->ipv6.ndisc_sk; struct flowi6 fl6; icmpv6_flow_init(sk, &fl6, type, saddr, daddr, skb->dev->ifindex); -- cgit v0.10.2 From e2a240c7d3bcebf90936cc7c22c2729b3a4cec1f Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 28 Aug 2013 18:55:39 +0800 Subject: driver:net:stmmac: Disable DMA store and forward mode if platform data force_thresh_dma_mode is set. Some synopsys ip implementation doesn't support DMA store and forward mode, such as BF60x. So, set force_thresh_dma_mode to use DMA thresholds only. Update document and devicetree as well. Signed-off-by: Sonic Zhang Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 261c563..eba0e5e 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -22,6 +22,11 @@ Required properties: - snps,pbl Programmable Burst Length - snps,fixed-burst Program the DMA to use the fixed burst mode - snps,mixed-burst Program the DMA to use the mixed burst mode +- snps,force_thresh_dma_mode Force DMA to use the threshold mode for + both tx and rx +- snps,force_sf_dma_mode Force DMA to use the Store and Forward + mode for both tx and rx. This flag is + ignored if force_thresh_dma_mode is set. Optional properties: - mac-address: 6 bytes, mac address diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt index 654d2e5..457b8bb 100644 --- a/Documentation/networking/stmmac.txt +++ b/Documentation/networking/stmmac.txt @@ -123,6 +123,7 @@ struct plat_stmmacenet_data { int bugged_jumbo; int pmt; int force_sf_dma_mode; + int force_thresh_dma_mode; int riwt_off; void (*fix_mac_speed)(void *priv, unsigned int speed); void (*bus_setup)(void __iomem *ioaddr); @@ -159,6 +160,8 @@ Where: o pmt: core has the embedded power module (optional). o force_sf_dma_mode: force DMA to use the Store and Forward mode instead of the Threshold. + o force_thresh_dma_mode: force DMA to use the Shreshold mode other than + the Store and Forward mode. o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode. o fix_mac_speed: this callback is used for modifying some syscfg registers (on ST SoCs) according to the link speed negotiated by the diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index be40691..8d4ccd3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1224,7 +1224,9 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { - if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { + if (priv->plat->force_thresh_dma_mode) + priv->hw->dma->dma_mode(priv->ioaddr, tc, tc); + else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { /* * In case of GMAC, SF mode can be enabled * to perform the TX COE in HW. This depends on: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index da8be6e..74a89a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -79,6 +79,11 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); + plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); + if (plat->force_thresh_dma_mode) { + plat->force_sf_dma_mode = 0; + pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); + } return 0; } diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 9e495d31..bb5deb0 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -108,6 +108,7 @@ struct plat_stmmacenet_data { int bugged_jumbo; int pmt; int force_sf_dma_mode; + int force_thresh_dma_mode; int riwt_off; void (*fix_mac_speed)(void *priv, unsigned int speed); void (*bus_setup)(void __iomem *ioaddr); -- cgit v0.10.2 From 7174012955d0e545e5253192e2cd37fedbc405f9 Mon Sep 17 00:00:00 2001 From: Lutz Jaenicke Date: Wed, 28 Aug 2013 13:34:31 +0200 Subject: macvlan: fix typo in assignment commit 3b04ddde02cf1b6f14f2697da5c20eca5715017f "[NET]: Move hardware header operations out of netdevice." moved the handling into macvlan setup adding dev->header_ops = &macvlan_hard_header_ops, At the end of the line the ',' should have been a ';' Signed-off-by: Lutz Jaenicke Signed-off-by: David S. Miller diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 510a9b6..201ef17 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -686,7 +686,7 @@ void macvlan_common_setup(struct net_device *dev) dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; dev->destructor = free_netdev; - dev->header_ops = &macvlan_hard_header_ops, + dev->header_ops = &macvlan_hard_header_ops; dev->ethtool_ops = &macvlan_ethtool_ops; } EXPORT_SYMBOL_GPL(macvlan_common_setup); -- cgit v0.10.2 From a3ea280004c5d873a75c860f7d9b0e2a4d45c5f4 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:44:34 +0900 Subject: net: ax88796: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index b7232a9..f92f001 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -840,7 +840,7 @@ static int ax_probe(struct platform_device *pdev) ei_local = netdev_priv(dev); ax = to_ax_dev(dev); - ax->plat = pdev->dev.platform_data; + ax->plat = dev_get_platdata(&pdev->dev); platform_set_drvdata(pdev, dev); ei_local->rxcr_base = ax->plat->rcr_val; -- cgit v0.10.2 From a63b82c44b18860e63016ae24e2bc4d97cefc806 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:50:48 +0900 Subject: net: bfin_mac: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index e904b38..e66684a 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1647,12 +1647,12 @@ static int bfin_mac_probe(struct platform_device *pdev) setup_mac_addr(ndev->dev_addr); - if (!pdev->dev.platform_data) { + if (!dev_get_platdata(&pdev->dev)) { dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n"); rc = -ENODEV; goto out_err_probe_mac; } - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); lp->mii_bus = platform_get_drvdata(pd); if (!lp->mii_bus) { dev_err(&pdev->dev, "Cannot get mii_bus!\n"); @@ -1660,7 +1660,7 @@ static int bfin_mac_probe(struct platform_device *pdev) goto out_err_probe_mac; } lp->mii_bus->priv = ndev; - mii_bus_data = pd->dev.platform_data; + mii_bus_data = dev_get_platdata(&pd->dev); rc = mii_probe(ndev, mii_bus_data->phy_mode); if (rc) { -- cgit v0.10.2 From 1fc2c469aeb55418029780bea3ee9a97d9961d6f Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:51:45 +0900 Subject: net: au1000_eth: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index ceb45bc..91d52b4 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1131,7 +1131,7 @@ static int au1000_probe(struct platform_device *pdev) writel(0, aup->enable); aup->mac_enabled = 0; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (!pd) { dev_info(&pdev->dev, "no platform_data passed," " PHY search on MAC0\n"); -- cgit v0.10.2 From cf0e7794ba073ae151e4a2316384af0d26d0740f Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:52:21 +0900 Subject: net: bcm63xx_enet: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 98b6870..8ac48fb 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1798,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->rx_ring_size = BCMENET_DEF_RX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd) { memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); priv->has_phy = pd->has_phy; @@ -1962,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev) } else { struct bcm63xx_enet_platform_data *pd; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd && pd->mii_config) pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii); @@ -2740,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) priv->tx_ring_size = BCMENET_DEF_TX_DESC; priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd) { memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); memcpy(priv->used_ports, pd->used_ports, -- cgit v0.10.2 From b96f64dd473416d877cfc0bdf5efeeb705c68734 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:54:37 +0900 Subject: net: ep93xx_eth: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index e3d4ec8..ec88de4 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -814,7 +814,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev) if (pdev == NULL) return -ENODEV; - data = pdev->dev.platform_data; + data = dev_get_platdata(&pdev->dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); -- cgit v0.10.2 From cd4e2e4b368fd52e29817fe9447c2204b76467f9 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:55:14 +0900 Subject: net: dm9000: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index a13b312..5f5896e 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1384,7 +1384,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) static int dm9000_probe(struct platform_device *pdev) { - struct dm9000_plat_data *pdata = pdev->dev.platform_data; + struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev); struct board_info *db; /* Point a board information structure */ struct net_device *ndev; const unsigned char *mac_src; -- cgit v0.10.2 From 420fcd82206ff5e03690247b10a08e585446a517 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:55:46 +0900 Subject: net: ethoc: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index cf579fb..4de8cfd 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1030,8 +1030,8 @@ static int ethoc_probe(struct platform_device *pdev) } /* Allow the platform setup code to pass in a MAC address. */ - if (pdev->dev.platform_data) { - struct ethoc_platform_data *pdata = pdev->dev.platform_data; + if (dev_get_platdata(&pdev->dev)) { + struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev); memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); priv->phy_id = pdata->phy_id; } else { -- cgit v0.10.2 From 94660ba09079dd8a4527cb26b6c76724394f0460 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:56:26 +0900 Subject: net: fec: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Acked-by: Fugang Duan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c88a129..46019ba 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1059,7 +1059,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) static void fec_get_mac(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; + struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); unsigned char *iap, tmpaddr[ETH_ALEN]; /* @@ -2088,7 +2088,7 @@ fec_probe(struct platform_device *pdev) ret = of_get_phy_mode(pdev->dev.of_node); if (ret < 0) { - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (pdata) fep->phy_interface = pdata->phy; else -- cgit v0.10.2 From bbfa6d0aa501ee7fd476a7bb8e0045b3851295df Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:57:13 +0900 Subject: net: mv643xx_eth: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index c35db73..7fb5677 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2641,7 +2641,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_probe(pdev); if (ret) return ret; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? pd->tx_csum_limit : 9 * 1024; @@ -2833,7 +2833,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) struct resource *res; int err; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd == NULL) { dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); return -ENODEV; -- cgit v0.10.2 From e19eac0ec5a77fd53cd909a25435f7f6ac81d622 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:58:00 +0900 Subject: net: pxa168_eth: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3e69feb..4ae0c74 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1516,7 +1516,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); eth_hw_addr_random(dev); - pep->pd = pdev->dev.platform_data; + pep->pd = dev_get_platdata(&pdev->dev); pep->rx_ring_size = NUM_RX_DESCS; if (pep->pd->rx_queue_size) pep->rx_ring_size = pep->pd->rx_queue_size; -- cgit v0.10.2 From 0dd14b670e873dc8c19821aca1d2a737c86c6bc7 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:58:30 +0900 Subject: net: ks8842: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 94b3bd6..0951f7a 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1148,7 +1148,7 @@ static int ks8842_probe(struct platform_device *pdev) struct resource *iomem; struct net_device *netdev; struct ks8842_adapter *adapter; - struct ks8842_platform_data *pdata = pdev->dev.platform_data; + struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev); u16 id; unsigned i; -- cgit v0.10.2 From ee9466124b9aced68c4e90bf8fe77fabd6fb5dd3 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:59:01 +0900 Subject: net: ks8851-ml: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 9f3f5db..0fba153 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1636,7 +1636,7 @@ static int ks8851_probe(struct platform_device *pdev) } else { struct ks8851_mll_platform_data *pdata; - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (!pdata) { netdev_err(netdev, "No platform data\n"); err = -ENODEV; -- cgit v0.10.2 From 8866f45e57b04214bc1d16125a0c23e80585453e Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 13:59:34 +0900 Subject: net: netx-eth: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index dc2c6f5..235fd51 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -390,7 +390,7 @@ static int netx_eth_drv_probe(struct platform_device *pdev) priv = netdev_priv(ndev); - pdata = (struct netxeth_platform_data *)pdev->dev.platform_data; + pdata = (struct netxeth_platform_data *)dev_get_platdata(&pdev->dev); priv->xc = request_xc(pdata->xcno, &pdev->dev); if (!priv->xc) { dev_err(&pdev->dev, "unable to request xc engine\n"); -- cgit v0.10.2 From 0b76b86235296efeabe49f79a3e70336cff9664e Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:00:11 +0900 Subject: net: sh_eth: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4b47300..06d8f62 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2606,7 +2606,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) struct resource *res; struct net_device *ndev = NULL; struct sh_eth_private *mdp = NULL; - struct sh_eth_plat_data *pd = pdev->dev.platform_data; + struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); const struct platform_device_id *id = platform_get_device_id(pdev); /* get base addr */ -- cgit v0.10.2 From d776542853a1418457e3fe09b2f1f2955065b375 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:00:45 +0900 Subject: net: seeq: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 856e523..c765718 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = { static int sgiseeq_probe(struct platform_device *pdev) { - struct sgiseeq_platform_data *pd = pdev->dev.platform_data; + struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev); struct hpc3_regs *hpcregs = pd->hpc; struct sgiseeq_init_block *sr; unsigned int irq = pd->irq; -- cgit v0.10.2 From f64deacaa3211db89037d1cd4aa9d35718191d0a Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:01:35 +0900 Subject: net: smc91x: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index cde13be..73be7f3 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2202,7 +2202,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * */ static int smc_drv_probe(struct platform_device *pdev) { - struct smc91x_platdata *pd = pdev->dev.platform_data; + struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); struct smc_local *lp; struct net_device *ndev; struct resource *res, *ires; -- cgit v0.10.2 From c82e5e571b3f4bebbe908db0748b9a1deb535015 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:02:06 +0900 Subject: net: smc911x: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 345558f..afe01c4 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -2067,7 +2067,7 @@ static int smc911x_drv_probe(struct platform_device *pdev) lp->netdev = ndev; #ifdef SMC_DYNAMIC_BUS_CONFIG { - struct smc911x_platdata *pd = pdev->dev.platform_data; + struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev); if (!pd) { ret = -EINVAL; goto release_both; -- cgit v0.10.2 From 495c765d7d1ff72b4039bd9a7ba3c627b80ba4d8 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:02:57 +0900 Subject: net: smsc911x: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index a141921..5fdbc26 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2374,7 +2374,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct net_device *dev; struct smsc911x_data *pdata; - struct smsc911x_platform_config *config = pdev->dev.platform_data; + struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); struct resource *res, *irq_res; unsigned int intcfg = 0; int res_size, irq_flags; -- cgit v0.10.2 From df3f1c39406740712f0a2cacb8c3699bf6413ebe Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:04:17 +0900 Subject: net: niu: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index fa32240..269c08b 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9360,7 +9360,7 @@ static ssize_t show_port_phy(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); - struct niu_parent *p = plat_dev->dev.platform_data; + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); u32 port_phy = p->port_phy; char *orig_buf = buf; int i; @@ -9390,7 +9390,7 @@ static ssize_t show_plat_type(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); - struct niu_parent *p = plat_dev->dev.platform_data; + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); const char *type_str; switch (p->plat_type) { @@ -9419,7 +9419,7 @@ static ssize_t __show_chan_per_port(struct device *dev, int rx) { struct platform_device *plat_dev = to_platform_device(dev); - struct niu_parent *p = plat_dev->dev.platform_data; + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); char *orig_buf = buf; u8 *arr; int i; @@ -9452,7 +9452,7 @@ static ssize_t show_num_ports(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); - struct niu_parent *p = plat_dev->dev.platform_data; + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); return sprintf(buf, "%d\n", p->num_ports); } -- cgit v0.10.2 From a0ea2ac8976772b9a38bbfddf01a86223d84b863 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:05:02 +0900 Subject: net: cpmac: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 31bbbca..2dc16b6 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -636,7 +636,7 @@ static void cpmac_hw_stop(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; + struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata->reset_bit); cpmac_write(priv->regs, CPMAC_RX_CONTROL, @@ -659,7 +659,7 @@ static void cpmac_hw_start(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; + struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata->reset_bit); for (i = 0; i < 8; i++) { @@ -1118,7 +1118,7 @@ static int cpmac_probe(struct platform_device *pdev) struct net_device *dev; struct plat_cpmac_data *pdata; - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (external_switch || dumb_switch) { strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ -- cgit v0.10.2 From 20e6f33bb2e8529b97d6bdbbc4a6d35799d43521 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 12:29:57 +0900 Subject: net: davinci_emac: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Acked-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 1a222bce..67df09e 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1761,7 +1761,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) const u8 *mac_addr; if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) - return pdev->dev.platform_data; + return dev_get_platdata(&pdev->dev); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) -- cgit v0.10.2 From 894cdbb0a40388fedde12ec893f6e900f6162ccf Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:06:02 +0900 Subject: net: davinci_mdio: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Acked-by: Mugunthan V N Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 7f85143..4ec9265 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -314,7 +314,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, static int davinci_mdio_probe(struct platform_device *pdev) { - struct mdio_platform_data *pdata = pdev->dev.platform_data; + struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device *dev = &pdev->dev; struct davinci_mdio_data *data; struct resource *res; -- cgit v0.10.2 From 3ae603c40d224fa6a30535b9eaad29a9675ac4c9 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:06:41 +0900 Subject: net: tsi108: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index f2e76c9..c4dbf98 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1558,7 +1558,7 @@ tsi108_init_one(struct platform_device *pdev) hw_info *einfo; int err = 0; - einfo = pdev->dev.platform_data; + einfo = dev_get_platdata(&pdev->dev); if (NULL == einfo) { printk(KERN_ERR "tsi-eth %d: Missing additional data!\n", -- cgit v0.10.2 From 59f92f49705eb28c44945f42378c2422119a8b57 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:07:06 +0900 Subject: net: w5300: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index e928845..71c27b3 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -542,7 +542,7 @@ static const struct net_device_ops w5300_netdev_ops = { static int w5300_hw_probe(struct platform_device *pdev) { - struct wiznet_platform_data *data = pdev->dev.platform_data; + struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev); struct net_device *ndev = platform_get_drvdata(pdev); struct w5300_priv *priv = netdev_priv(ndev); const char *name = netdev_name(ndev); -- cgit v0.10.2 From 5988aa61088b01f91e9c2e935f2c857c8393d57d Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:07:38 +0900 Subject: net: w5100: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 30fed08..0df36c6 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -622,7 +622,7 @@ static const struct net_device_ops w5100_netdev_ops = { static int w5100_hw_probe(struct platform_device *pdev) { - struct wiznet_platform_data *data = pdev->dev.platform_data; + struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev); struct net_device *ndev = platform_get_drvdata(pdev); struct w5100_priv *priv = netdev_priv(ndev); const char *name = netdev_name(ndev); -- cgit v0.10.2 From 387d40ac69178e39dde7a08e65b9c54ff243be6d Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:08:22 +0900 Subject: net: ixp4xx_eth: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 3d689fc..e78802e 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1384,7 +1384,7 @@ static int eth_init_one(struct platform_device *pdev) { struct port *port; struct net_device *dev; - struct eth_plat_info *plat = pdev->dev.platform_data; + struct eth_plat_info *plat = dev_get_platdata(&pdev->dev); u32 regs_phys; char phy_id[MII_BUS_ID_SIZE + 3]; int err; -- cgit v0.10.2 From 9bc7b1cd5ca7a732a198282b6937013df1c6881c Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:08:55 +0900 Subject: net: mdio-gpio: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index a47f923..8004acb 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -191,7 +191,7 @@ static int mdio_gpio_probe(struct platform_device *pdev) pdata = mdio_gpio_of_get_data(pdev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); } else { - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); bus_id = pdev->id; } -- cgit v0.10.2 From 1b0e53a8f9e67c5f41d873194af9877d32e19976 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:09:26 +0900 Subject: net: mdio-mux-gpio: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index e91d7d7..d2dd9e4 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -106,7 +106,7 @@ err: static int mdio_mux_gpio_remove(struct platform_device *pdev) { - struct mdio_mux_gpio_state *s = pdev->dev.platform_data; + struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); return 0; } -- cgit v0.10.2 From 09a274805676239d52198503ad552537c1148aad Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:11:53 +0900 Subject: net: at91_ether: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Acked-by: Nicolas Ferre Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index bb5d63f..ce75de9 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, at91ether_dt_ids); /* Detect MAC & PHY and perform ethernet interface initialization */ static int __init at91ether_probe(struct platform_device *pdev) { - struct macb_platform_data *board_data = pdev->dev.platform_data; + struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev); struct resource *regs; struct net_device *dev; struct phy_device *phydev; -- cgit v0.10.2 From c607a0d99cef3e14a2fe99771348a49e6cb4fa55 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:12:21 +0900 Subject: net: macb: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Acked-by: Nicolas Ferre Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 7660c45..9257869 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -125,7 +125,7 @@ void macb_get_hwaddr(struct macb *bp) u8 addr[6]; int i; - pdata = bp->pdev->dev.platform_data; + pdata = dev_get_platdata(&bp->pdev->dev); /* Check all 4 address register for vaild address */ for (i = 0; i < 4; i++) { @@ -335,7 +335,7 @@ int macb_mii_init(struct macb *bp) bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; bp->mii_bus->parent = &bp->dev->dev; - pdata = bp->pdev->dev.platform_data; + pdata = dev_get_platdata(&bp->pdev->dev); bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!bp->mii_bus->irq) { @@ -1851,7 +1851,7 @@ static int __init macb_probe(struct platform_device *pdev) err = of_get_phy_mode(pdev->dev.of_node); if (err < 0) { - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->is_rmii) bp->phy_interface = PHY_INTERFACE_MODE_RMII; else -- cgit v0.10.2 From f91b29f5b8709a3968ce0e155cfeab2bc6f733dd Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 30 Aug 2013 14:13:46 +0900 Subject: net: stmmac: use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. This is a cosmetic change to make the code simpler and enhance the readability. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 74a89a4..623ebc5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -118,7 +118,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) if (IS_ERR(addr)) return PTR_ERR(addr); - plat_dat = pdev->dev.platform_data; + plat_dat = dev_get_platdata(&pdev->dev); if (pdev->dev.of_node) { if (!plat_dat) plat_dat = devm_kzalloc(&pdev->dev, -- cgit v0.10.2 From 7d7628f3719e85e41d3948759ef969707036033d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 29 Aug 2013 11:25:14 +0300 Subject: net/fec: cleanup types in fec_get_mac() My static checker complains that on some arches unsigned longs can be 8 characters which is larger than the buffer is only 6 chars. Additionally, Ben Hutchings points out that the buffer actually holds big endian data and the buffer we are reading from is CPU endian. Signed-off-by: Dan Carpenter Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 46019ba..0cd5e4b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1099,10 +1099,10 @@ static void fec_get_mac(struct net_device *ndev) * 4) FEC mac registers set by bootloader */ if (!is_valid_ether_addr(iap)) { - *((unsigned long *) &tmpaddr[0]) = - be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); - *((unsigned short *) &tmpaddr[4]) = - be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); + *((__be32 *) &tmpaddr[0]) = + cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); + *((__be16 *) &tmpaddr[4]) = + cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); iap = &tmpaddr[0]; } -- cgit v0.10.2 From 6da7c8fcbcbdb50ec68c61b40d554c74850fdb91 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 27 Aug 2013 16:19:08 -0700 Subject: qdisc: allow setting default queuing discipline By default, the pfifo_fast queue discipline has been used by default for all devices. But we have better choices now. This patch allow setting the default queueing discipline with sysctl. This allows easy use of better queueing disciplines on all devices without having to use tc qdisc scripts. It is intended to allow an easy path for distributions to make fq_codel or sfq the default qdisc. This patch also makes pfifo_fast more of a first class qdisc, since it is now possible to manually override the default and explicitly use pfifo_fast. The behavior for systems who do not use the sysctl is unchanged, they still get pfifo_fast Also removes leftover random # in sysctl net core. Signed-off-by: Stephen Hemminger Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index d569f2a..9a0319a 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -50,6 +50,19 @@ The maximum number of packets that kernel can handle on a NAPI interrupt, it's a Per-CPU variable. Default: 64 +default_qdisc +-------------- + +The default queuing discipline to use for network devices. This allows +overriding the default queue discipline of pfifo_fast with an +alternative. Since the default queuing discipline is created with the +no additional parameters so is best suited to queuing disciplines that +work well without configuration like stochastic fair queue (sfq), +CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines +like Hierarchical Token Bucket or Deficit Round Robin which require setting +up classes and bandwidths. +Default: pfifo_fast + busy_read ---------------- Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index f7c24f8..59ec3cd 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -85,6 +85,9 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, int register_qdisc(struct Qdisc_ops *qops); int unregister_qdisc(struct Qdisc_ops *qops); +void qdisc_get_default(char *id, size_t len); +int qdisc_set_default(const char *id); + void qdisc_list_del(struct Qdisc *q); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 76368c9..2e31bf1 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -316,6 +316,7 @@ extern struct Qdisc noop_qdisc; extern struct Qdisc_ops noop_qdisc_ops; extern struct Qdisc_ops pfifo_fast_ops; extern struct Qdisc_ops mq_qdisc_ops; +extern const struct Qdisc_ops *default_qdisc_ops; struct Qdisc_class_common { u32 classid; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 31107ab..cca4441 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -20,6 +20,7 @@ #include #include #include +#include static int zero = 0; static int one = 1; @@ -193,6 +194,26 @@ static int flow_limit_table_len_sysctl(struct ctl_table *table, int write, } #endif /* CONFIG_NET_FLOW_LIMIT */ +#ifdef CONFIG_NET_SCHED +static int set_default_qdisc(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + char id[IFNAMSIZ]; + struct ctl_table tbl = { + .data = id, + .maxlen = IFNAMSIZ, + }; + int ret; + + qdisc_get_default(id, IFNAMSIZ); + + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) + ret = qdisc_set_default(id); + return ret; +} +#endif + static struct ctl_table net_core_table[] = { #ifdef CONFIG_NET { @@ -315,7 +336,14 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, -# +#endif +#ifdef CONFIG_NET_SCHED + { + .procname = "default_qdisc", + .mode = 0644, + .maxlen = IFNAMSIZ, + .proc_handler = set_default_qdisc + }, #endif #endif /* CONFIG_NET */ { diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 51b968d..812e579 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -131,6 +131,11 @@ static DEFINE_RWLOCK(qdisc_mod_lock); ************************************************/ +/* Qdisc to use by default */ + +const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; +EXPORT_SYMBOL(default_qdisc_ops); + /* The list of all installed queueing disciplines. */ static struct Qdisc_ops *qdisc_base; @@ -200,6 +205,58 @@ int unregister_qdisc(struct Qdisc_ops *qops) } EXPORT_SYMBOL(unregister_qdisc); +/* Get default qdisc if not otherwise specified */ +void qdisc_get_default(char *name, size_t len) +{ + read_lock(&qdisc_mod_lock); + strlcpy(name, default_qdisc_ops->id, len); + read_unlock(&qdisc_mod_lock); +} + +static struct Qdisc_ops *qdisc_lookup_default(const char *name) +{ + struct Qdisc_ops *q = NULL; + + for (q = qdisc_base; q; q = q->next) { + if (!strcmp(name, q->id)) { + if (!try_module_get(q->owner)) + q = NULL; + break; + } + } + + return q; +} + +/* Set new default qdisc to use */ +int qdisc_set_default(const char *name) +{ + const struct Qdisc_ops *ops; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + write_lock(&qdisc_mod_lock); + ops = qdisc_lookup_default(name); + if (!ops) { + /* Not found, drop lock and try to load module */ + write_unlock(&qdisc_mod_lock); + request_module("sch_%s", name); + write_lock(&qdisc_mod_lock); + + ops = qdisc_lookup_default(name); + } + + if (ops) { + /* Set new default */ + module_put(default_qdisc_ops->owner); + default_qdisc_ops = ops; + } + write_unlock(&qdisc_mod_lock); + + return ops ? 0 : -ENOENT; +} + /* We know handle. Find qdisc among all qdisc's attached to device (root qdisc, all its children, children of children etc.) */ @@ -1854,6 +1911,7 @@ static int __init pktsched_init(void) return err; } + register_qdisc(&pfifo_fast_ops); register_qdisc(&pfifo_qdisc_ops); register_qdisc(&bfifo_qdisc_ops); register_qdisc(&pfifo_head_drop_qdisc_ops); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 48be3d5..5078e0c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -530,7 +530,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = { .dump = pfifo_fast_dump, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(pfifo_fast_ops); static struct lock_class_key qdisc_tx_busylock; @@ -583,6 +582,9 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, { struct Qdisc *sch; + if (!try_module_get(ops->owner)) + goto errout; + sch = qdisc_alloc(dev_queue, ops); if (IS_ERR(sch)) goto errout; @@ -686,7 +688,7 @@ static void attach_one_default_qdisc(struct net_device *dev, if (dev->tx_queue_len) { qdisc = qdisc_create_dflt(dev_queue, - &pfifo_fast_ops, TC_H_ROOT); + default_qdisc_ops, TC_H_ROOT); if (!qdisc) { netdev_info(dev, "activation failed\n"); return; @@ -739,9 +741,8 @@ void dev_activate(struct net_device *dev) int need_watchdog; /* No queueing discipline is attached to device; - create default one i.e. pfifo_fast for devices, - which need queueing and noqueue_qdisc for - virtual interfaces + * create default one for devices, which need queueing + * and noqueue_qdisc for virtual interfaces */ if (dev->qdisc == &noop_qdisc) diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 5da78a1..2e56185 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -57,7 +57,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { dev_queue = netdev_get_tx_queue(dev, ntx); - qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, + qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(ntx + 1))); if (qdisc == NULL) diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index accec33..d44c868 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -124,7 +124,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) for (i = 0; i < dev->num_tx_queues; i++) { dev_queue = netdev_get_tx_queue(dev, i); - qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, + qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(i + 1))); if (qdisc == NULL) { -- cgit v0.10.2 From d2a7f269f91299b8e5f6c7d2a41ba46248d73d24 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Sat, 31 Aug 2013 10:15:50 -0700 Subject: qdisc: make args to qdisc_create_default const Fixes warnings introduced by the qdisc default patch. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 2e31bf1..f4eb365 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -370,9 +370,9 @@ void qdisc_reset(struct Qdisc *qdisc); void qdisc_destroy(struct Qdisc *qdisc); void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops); + const struct Qdisc_ops *ops); struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops, u32 parentid); + const struct Qdisc_ops *ops, u32 parentid); void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); void tcf_destroy(struct tcf_proto *tp); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5078e0c..6b02110 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -534,7 +534,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = { static struct lock_class_key qdisc_tx_busylock; struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops) + const struct Qdisc_ops *ops) { void *p; struct Qdisc *sch; @@ -578,7 +578,8 @@ errout: } struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, - struct Qdisc_ops *ops, unsigned int parentid) + const struct Qdisc_ops *ops, + unsigned int parentid) { struct Qdisc *sch; -- cgit v0.10.2 From 34aedd3f3b289edba118e66450e95790ccab5091 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Sat, 31 Aug 2013 10:15:33 -0700 Subject: qdisc: fix build with !CONFIG_NET_SCHED Multiqueue scheduler refers to default_qdisc_ops; therefore the variable definition needs to be moved to handle case where net scheduler API is not available. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 812e579..2adda7f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -131,11 +131,6 @@ static DEFINE_RWLOCK(qdisc_mod_lock); ************************************************/ -/* Qdisc to use by default */ - -const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; -EXPORT_SYMBOL(default_qdisc_ops); - /* The list of all installed queueing disciplines. */ static struct Qdisc_ops *qdisc_base; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6b02110..a74e278 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -30,6 +30,10 @@ #include #include +/* Qdisc to use by default */ +const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; +EXPORT_SYMBOL(default_qdisc_ops); + /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with -- cgit v0.10.2 From 3ce9b35ff6de8dfebb0b0045e667c000f632e563 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:28 +0800 Subject: ipv6: move ip6_dst_hoplimit() into core kernel It will be used by vxlan, and may not be inlined. Cc: Eric Dumazet Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f6672482..f525e70 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -112,8 +112,6 @@ extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, bool anycast); -extern int ip6_dst_hoplimit(struct dst_entry *dst); - /* * support functions for ND * diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 7bdff04..bbf1c8f 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -658,6 +658,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); +extern int ip6_dst_hoplimit(struct dst_entry *dst); + /* * Header manipulation */ diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index ab92a36..53a0621 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -5,6 +5,7 @@ #include #include #include +#include void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) { @@ -75,3 +76,24 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) return offset; } EXPORT_SYMBOL(ip6_find_1stfragopt); + +#if IS_ENABLED(CONFIG_IPV6) +int ip6_dst_hoplimit(struct dst_entry *dst) +{ + int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); + if (hoplimit == 0) { + struct net_device *dev = dst->dev; + struct inet6_dev *idev; + + rcu_read_lock(); + idev = __in6_dev_get(dev); + if (idev) + hoplimit = idev->cnf.hop_limit; + else + hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; + rcu_read_unlock(); + } + return hoplimit; +} +EXPORT_SYMBOL(ip6_dst_hoplimit); +#endif diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 55236a8..b770085 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1354,25 +1354,6 @@ out: return entries > rt_max_size; } -int ip6_dst_hoplimit(struct dst_entry *dst) -{ - int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); - if (hoplimit == 0) { - struct net_device *dev = dst->dev; - struct inet6_dev *idev; - - rcu_read_lock(); - idev = __in6_dev_get(dev); - if (idev) - hoplimit = idev->cnf.hop_limit; - else - hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; - rcu_read_unlock(); - } - return hoplimit; -} -EXPORT_SYMBOL(ip6_dst_hoplimit); - /* * */ -- cgit v0.10.2 From 788787b55913d456b9851d926d9a80ff00892c34 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:29 +0800 Subject: ipv6: move ip6_local_out into core kernel It will be used the vxlan kernel module. Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6e3ddf8..dd08cfd 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -56,31 +56,6 @@ #include #include -int __ip6_local_out(struct sk_buff *skb) -{ - int len; - - len = skb->len - sizeof(struct ipv6hdr); - if (len > IPV6_MAXPLEN) - len = 0; - ipv6_hdr(skb)->payload_len = htons(len); - - return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, - skb_dst(skb)->dev, dst_output); -} - -int ip6_local_out(struct sk_buff *skb) -{ - int err; - - err = __ip6_local_out(skb); - if (likely(err == 1)) - err = dst_output(skb); - - return err; -} -EXPORT_SYMBOL_GPL(ip6_local_out); - static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 53a0621..827f795 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -97,3 +97,29 @@ int ip6_dst_hoplimit(struct dst_entry *dst) } EXPORT_SYMBOL(ip6_dst_hoplimit); #endif + +int __ip6_local_out(struct sk_buff *skb) +{ + int len; + + len = skb->len - sizeof(struct ipv6hdr); + if (len > IPV6_MAXPLEN) + len = 0; + ipv6_hdr(skb)->payload_len = htons(len); + + return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, + skb_dst(skb)->dev, dst_output); +} +EXPORT_SYMBOL_GPL(__ip6_local_out); + +int ip6_local_out(struct sk_buff *skb) +{ + int err; + + err = __ip6_local_out(skb); + if (likely(err == 1)) + err = dst_output(skb); + + return err; +} +EXPORT_SYMBOL_GPL(ip6_local_out); -- cgit v0.10.2 From 5f81bd2e5d804ca93f3ec8873451b22d2f454721 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:30 +0800 Subject: ipv6: export a stub for IPv6 symbols used by vxlan In case IPv6 is compiled as a module, introduce a stub for ipv6_sock_mc_join and ipv6_sock_mc_drop etc.. It will be used by vxlan module. Suggested by Ben. This is an ugly but easy solution for now. Cc: Ben Hutchings Cc: Stephen Hemminger Cc: David S. Miller Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 43fa31a..5339cab 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -141,6 +141,21 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, const struct in6_addr *src_addr); void ipv6_mc_dad_complete(struct inet6_dev *idev); + +/* A stub used by vxlan module. This is ugly, ideally these + * symbols should be built into the core kernel. + */ +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex, + const struct in6_addr *addr); + int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, + const struct in6_addr *addr); + int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, + struct flowi6 *fl6); + void (*udpv6_encap_enable)(void); +}; +extern const struct ipv6_stub *ipv6_stub __read_mostly; + /* * identify MLD packets for MLD filter exceptions */ diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d2f8742..93b24c6 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -98,3 +98,6 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v) return atomic_notifier_call_chain(&inet6addr_chain, val, v); } EXPORT_SYMBOL(inet6addr_notifier_call_chain); + +const struct ipv6_stub *ipv6_stub __read_mostly; +EXPORT_SYMBOL_GPL(ipv6_stub); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0d1a9b1..0c9c22f 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -810,6 +810,13 @@ static struct pernet_operations inet6_net_ops = { .exit = inet6_net_exit, }; +static const struct ipv6_stub ipv6_stub_impl = { + .ipv6_sock_mc_join = ipv6_sock_mc_join, + .ipv6_sock_mc_drop = ipv6_sock_mc_drop, + .ipv6_dst_lookup = ip6_dst_lookup, + .udpv6_encap_enable = udpv6_encap_enable, +}; + static int __init inet6_init(void) { struct list_head *r; @@ -884,6 +891,9 @@ static int __init inet6_init(void) err = igmp6_init(); if (err) goto igmp_fail; + + ipv6_stub = &ipv6_stub_impl; + err = ipv6_netfilter_init(); if (err) goto netfilter_fail; @@ -1040,6 +1050,7 @@ static void __exit inet6_exit(void) raw6_proc_exit(); #endif ipv6_netfilter_fini(); + ipv6_stub = NULL; igmp6_cleanup(); ndisc_cleanup(); ip6_mr_cleanup(); -- cgit v0.10.2 From 034dfc5df99eb8d263211524983b1a737b25c06b Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:31 +0800 Subject: ipv6: export in6addr_loopback to modules It is needed by vxlan module. Noticed by Mike. Cc: Mike Rapoport Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a7183fc..6194513 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -245,15 +245,6 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .suppress_frag_ndisc = 1, }; -/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ -const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; -const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; -const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT; -const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; -const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT; -const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT; -const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT; - /* Check if a valid qdisc is available */ static inline bool addrconf_qdisc_ok(const struct net_device *dev) { diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 93b24c6..a864033 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -101,3 +101,19 @@ EXPORT_SYMBOL(inet6addr_notifier_call_chain); const struct ipv6_stub *ipv6_stub __read_mostly; EXPORT_SYMBOL_GPL(ipv6_stub); + +/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ +const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; +EXPORT_SYMBOL(in6addr_loopback); +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; +EXPORT_SYMBOL(in6addr_any); +const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT; +EXPORT_SYMBOL(in6addr_linklocal_allnodes); +const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; +EXPORT_SYMBOL(in6addr_linklocal_allrouters); +const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT; +EXPORT_SYMBOL(in6addr_interfacelocal_allnodes); +const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT; +EXPORT_SYMBOL(in6addr_interfacelocal_allrouters); +const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT; +EXPORT_SYMBOL(in6addr_sitelocal_allrouters); -- cgit v0.10.2 From caf92bc4007036cfac8ee06c845fdfe6496e4fb3 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:32 +0800 Subject: ipv6: do not call ndisc_send_rs() with write lock Because vxlan module will call ip6_dst_lookup() in TX path, which will hold write lock. So we have to release this write lock before calling ndisc_send_rs(), otherwise could deadlock. Reviewed-by: Hannes Frederic Sowa Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6194513..baaaead 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3090,6 +3090,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) static void addrconf_rs_timer(unsigned long data) { struct inet6_dev *idev = (struct inet6_dev *)data; + struct net_device *dev = idev->dev; struct in6_addr lladdr; write_lock(&idev->lock); @@ -3104,12 +3105,14 @@ static void addrconf_rs_timer(unsigned long data) goto out; if (idev->rs_probes++ < idev->cnf.rtr_solicits) { - if (!__ipv6_get_lladdr(idev, &lladdr, IFA_F_TENTATIVE)) - ndisc_send_rs(idev->dev, &lladdr, + write_unlock(&idev->lock); + if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) + ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters); else - goto out; + goto put; + write_lock(&idev->lock); /* The wait after the last probe can be shorter */ addrconf_mod_rs_timer(idev, (idev->rs_probes == idev->cnf.rtr_solicits) ? @@ -3125,6 +3128,7 @@ static void addrconf_rs_timer(unsigned long data) out: write_unlock(&idev->lock); +put: in6_dev_put(idev); } -- cgit v0.10.2 From e4c7ed415387cf718ffbec305396c30cee092987 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:33 +0800 Subject: vxlan: add ipv6 support This patch adds IPv6 support to vxlan device, as the new version RFC already mentions it: http://tools.ietf.org/html/draft-mahalingam-dutt-dcops-vxlan-03 Cc: David Stevens Cc: Stephen Hemminger Cc: David S. Miller Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3b21aca..faf131e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -6,9 +6,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * TODO - * - IPv6 (not in RFC) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -43,6 +40,11 @@ #include #include #include +#if IS_ENABLED(CONFIG_IPV6) +#include +#include +#include +#endif #define VXLAN_VERSION "0.1" @@ -59,6 +61,8 @@ #define VXLAN_VID_MASK (VXLAN_N_VID - 1) /* IP header + UDP + VXLAN + Ethernet header */ #define VXLAN_HEADROOM (20 + 8 + 8 + 14) +/* IPv6 header + UDP + VXLAN + Ethernet header */ +#define VXLAN6_HEADROOM (40 + 8 + 8 + 14) #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ @@ -92,8 +96,14 @@ struct vxlan_net { spinlock_t sock_lock; }; +union vxlan_addr { + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + struct sockaddr sa; +}; + struct vxlan_rdst { - __be32 remote_ip; + union vxlan_addr remote_ip; __be16 remote_port; u32 remote_vni; u32 remote_ifindex; @@ -120,7 +130,7 @@ struct vxlan_dev { struct vxlan_sock *vn_sock; /* listening socket */ struct net_device *dev; struct vxlan_rdst default_dst; /* default destination */ - __be32 saddr; /* source address */ + union vxlan_addr saddr; /* source address */ __be16 dst_port; __u16 port_min; /* source port range */ __u16 port_max; @@ -146,6 +156,7 @@ struct vxlan_dev { #define VXLAN_F_RSC 0x04 #define VXLAN_F_L2MISS 0x08 #define VXLAN_F_L3MISS 0x10 +#define VXLAN_F_IPV6 0x20 /* internal flag */ /* salt for hash table */ static u32 vxlan_salt __read_mostly; @@ -153,6 +164,96 @@ static struct workqueue_struct *vxlan_wq; static void vxlan_sock_work(struct work_struct *work); +#if IS_ENABLED(CONFIG_IPV6) +static inline +bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) +{ + if (a->sa.sa_family != b->sa.sa_family) + return false; + if (a->sa.sa_family == AF_INET6) + return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); + else + return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; +} + +static inline bool vxlan_addr_any(const union vxlan_addr *ipa) +{ + if (ipa->sa.sa_family == AF_INET6) + return ipv6_addr_any(&ipa->sin6.sin6_addr); + else + return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); +} + +static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) +{ + if (ipa->sa.sa_family == AF_INET6) + return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); + else + return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); +} + +static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) +{ + if (nla_len(nla) >= sizeof(struct in6_addr)) { + nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr)); + ip->sa.sa_family = AF_INET6; + return 0; + } else if (nla_len(nla) >= sizeof(__be32)) { + ip->sin.sin_addr.s_addr = nla_get_be32(nla); + ip->sa.sa_family = AF_INET; + return 0; + } else { + return -EAFNOSUPPORT; + } +} + +static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, + const union vxlan_addr *ip) +{ + if (ip->sa.sa_family == AF_INET6) + return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr); + else + return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr); +} + +#else /* !CONFIG_IPV6 */ + +static inline +bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) +{ + return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; +} + +static inline bool vxlan_addr_any(const union vxlan_addr *ipa) +{ + return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); +} + +static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) +{ + return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); +} + +static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) +{ + if (nla_len(nla) >= sizeof(struct in6_addr)) { + return -EAFNOSUPPORT; + } else if (nla_len(nla) >= sizeof(__be32)) { + ip->sin.sin_addr.s_addr = nla_get_be32(nla); + ip->sa.sa_family = AF_INET; + return 0; + } else { + return -EAFNOSUPPORT; + } +} + +static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, + const union vxlan_addr *ip) +{ + return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr); +} +#endif + /* Virtual Network hash table head */ static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id) { @@ -239,7 +340,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (type == RTM_GETNEIGH) { ndm->ndm_family = AF_INET; - send_ip = rdst->remote_ip != htonl(INADDR_ANY); + send_ip = !vxlan_addr_any(&rdst->remote_ip); send_eth = !is_zero_ether_addr(fdb->eth_addr); } else ndm->ndm_family = AF_BRIDGE; @@ -251,7 +352,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) goto nla_put_failure; - if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip)) + if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) goto nla_put_failure; if (rdst->remote_port && rdst->remote_port != vxlan->dst_port && @@ -283,7 +384,7 @@ static inline size_t vxlan_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ - + nla_total_size(sizeof(__be32)) /* NDA_DST */ + + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ + nla_total_size(sizeof(__be16)) /* NDA_PORT */ + nla_total_size(sizeof(__be32)) /* NDA_VNI */ + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ @@ -317,14 +418,14 @@ errout: rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } -static void vxlan_ip_miss(struct net_device *dev, __be32 ipa) +static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb f = { .state = NUD_STALE, }; struct vxlan_rdst remote = { - .remote_ip = ipa, /* goes to NDA_DST */ + .remote_ip = *ipa, /* goes to NDA_DST */ .remote_vni = VXLAN_N_VID, }; @@ -397,13 +498,13 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, /* caller should hold vxlan->hash_lock */ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, - __be32 ip, __be16 port, + union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) { struct vxlan_rdst *rd; list_for_each_entry(rd, &f->remotes, list) { - if (rd->remote_ip == ip && + if (vxlan_addr_equal(&rd->remote_ip, ip) && rd->remote_port == port && rd->remote_vni == vni && rd->remote_ifindex == ifindex) @@ -415,7 +516,7 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, /* Replace destination of unicast mac */ static int vxlan_fdb_replace(struct vxlan_fdb *f, - __be32 ip, __be16 port, __u32 vni, __u32 ifindex) + union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) { struct vxlan_rdst *rd; @@ -426,7 +527,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); if (!rd) return 0; - rd->remote_ip = ip; + rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; rd->remote_ifindex = ifindex; @@ -435,7 +536,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, - __be32 ip, __be16 port, __u32 vni, __u32 ifindex) + union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) { struct vxlan_rdst *rd; @@ -446,7 +547,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) return -ENOBUFS; - rd->remote_ip = ip; + rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; rd->remote_ifindex = ifindex; @@ -458,7 +559,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, /* Add new entry to forwarding table -- assumes lock held */ static int vxlan_fdb_create(struct vxlan_dev *vxlan, - const u8 *mac, __be32 ip, + const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __u32 vni, __u32 ifindex, __u8 ndm_flags) @@ -517,7 +618,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) return -EOPNOTSUPP; - netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip); + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); f = kmalloc(sizeof(*f), GFP_ATOMIC); if (!f) return -ENOMEM; @@ -565,17 +666,26 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) } static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, - __be32 *ip, __be16 *port, u32 *vni, u32 *ifindex) + union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex) { struct net *net = dev_net(vxlan->dev); + int err; if (tb[NDA_DST]) { - if (nla_len(tb[NDA_DST]) != sizeof(__be32)) - return -EAFNOSUPPORT; - - *ip = nla_get_be32(tb[NDA_DST]); + err = vxlan_nla_get_addr(ip, tb[NDA_DST]); + if (err) + return err; } else { - *ip = htonl(INADDR_ANY); + union vxlan_addr *remote = &vxlan->default_dst.remote_ip; + if (remote->sa.sa_family == AF_INET) { + ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); + ip->sa.sa_family = AF_INET; +#if IS_ENABLED(CONFIG_IPV6) + } else { + ip->sin6.sin6_addr = in6addr_any; + ip->sa.sa_family = AF_INET6; +#endif + } } if (tb[NDA_PORT]) { @@ -618,7 +728,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], { struct vxlan_dev *vxlan = netdev_priv(dev); /* struct net *net = dev_net(vxlan->dev); */ - __be32 ip; + union vxlan_addr ip; __be16 port; u32 vni, ifindex; int err; @@ -637,7 +747,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; spin_lock_bh(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags, + err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, port, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); @@ -652,7 +762,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; struct vxlan_rdst *rd = NULL; - __be32 ip; + union vxlan_addr ip; __be16 port; u32 vni, ifindex; int err; @@ -668,8 +778,8 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], if (!f) goto out; - if (ip != htonl(INADDR_ANY)) { - rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); + if (!vxlan_addr_any(&ip)) { + rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); if (!rd) goto out; } @@ -732,7 +842,7 @@ out: * Return true if packet is bogus and should be droppped. */ static bool vxlan_snoop(struct net_device *dev, - __be32 src_ip, const u8 *src_mac) + union vxlan_addr *src_ip, const u8 *src_mac) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; @@ -741,7 +851,7 @@ static bool vxlan_snoop(struct net_device *dev, if (likely(f)) { struct vxlan_rdst *rdst = first_remote_rcu(f); - if (likely(rdst->remote_ip == src_ip)) + if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip))) return false; /* Don't migrate static entries, drop packets */ @@ -750,10 +860,10 @@ static bool vxlan_snoop(struct net_device *dev, if (net_ratelimit()) netdev_info(dev, - "%pM migrated from %pI4 to %pI4\n", + "%pM migrated from %pIS to %pIS\n", src_mac, &rdst->remote_ip, &src_ip); - rdst->remote_ip = src_ip; + rdst->remote_ip = *src_ip; f->updated = jiffies; vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); } else { @@ -775,7 +885,7 @@ static bool vxlan_snoop(struct net_device *dev, } /* See if multicast group is already in use by other ID */ -static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip) +static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip) { struct vxlan_dev *vxlan; @@ -783,7 +893,8 @@ static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip) if (!netif_running(vxlan->dev)) continue; - if (vxlan->default_dst.remote_ip == remote_ip) + if (vxlan_addr_equal(&vxlan->default_dst.remote_ip, + remote_ip)) return true; } @@ -819,13 +930,23 @@ static void vxlan_igmp_join(struct work_struct *work) struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; - struct ip_mreqn mreq = { - .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, - .imr_ifindex = vxlan->default_dst.remote_ifindex, - }; + union vxlan_addr *ip = &vxlan->default_dst.remote_ip; + int ifindex = vxlan->default_dst.remote_ifindex; lock_sock(sk); - ip_mc_join_group(sk, &mreq); + if (ip->sa.sa_family == AF_INET) { + struct ip_mreqn mreq = { + .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, + .imr_ifindex = ifindex, + }; + + ip_mc_join_group(sk, &mreq); +#if IS_ENABLED(CONFIG_IPV6) + } else { + ipv6_stub->ipv6_sock_mc_join(sk, ifindex, + &ip->sin6.sin6_addr); +#endif + } release_sock(sk); vxlan_sock_release(vs); @@ -838,13 +959,24 @@ static void vxlan_igmp_leave(struct work_struct *work) struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; - struct ip_mreqn mreq = { - .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, - .imr_ifindex = vxlan->default_dst.remote_ifindex, - }; + union vxlan_addr *ip = &vxlan->default_dst.remote_ip; + int ifindex = vxlan->default_dst.remote_ifindex; lock_sock(sk); - ip_mc_leave_group(sk, &mreq); + if (ip->sa.sa_family == AF_INET) { + struct ip_mreqn mreq = { + .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, + .imr_ifindex = ifindex, + }; + + ip_mc_leave_group(sk, &mreq); +#if IS_ENABLED(CONFIG_IPV6) + } else { + ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, + &ip->sin6.sin6_addr); +#endif + } + release_sock(sk); vxlan_sock_release(vs); @@ -896,11 +1028,14 @@ error: static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) { - struct iphdr *oip; + struct iphdr *oip = NULL; + struct ipv6hdr *oip6 = NULL; struct vxlan_dev *vxlan; struct pcpu_tstats *stats; + union vxlan_addr saddr; __u32 vni; - int err; + int err = 0; + union vxlan_addr *remote_ip; vni = ntohl(vx_vni) >> 8; /* Is this VNI defined? */ @@ -908,6 +1043,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, if (!vxlan) goto drop; + remote_ip = &vxlan->default_dst.remote_ip; skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, vxlan->dev); @@ -917,9 +1053,20 @@ static void vxlan_rcv(struct vxlan_sock *vs, goto drop; /* Re-examine inner Ethernet packet */ - oip = ip_hdr(skb); + if (remote_ip->sa.sa_family == AF_INET) { + oip = ip_hdr(skb); + saddr.sin.sin_addr.s_addr = oip->saddr; + saddr.sa.sa_family = AF_INET; +#if IS_ENABLED(CONFIG_IPV6) + } else { + oip6 = ipv6_hdr(skb); + saddr.sin6.sin6_addr = oip6->saddr; + saddr.sa.sa_family = AF_INET6; +#endif + } + if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source)) + vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) goto drop; skb_reset_network_header(skb); @@ -935,11 +1082,20 @@ static void vxlan_rcv(struct vxlan_sock *vs, skb->encapsulation = 0; - err = IP_ECN_decapsulate(oip, skb); + if (oip6) + err = IP6_ECN_decapsulate(oip6, skb); + if (oip) + err = IP_ECN_decapsulate(oip, skb); + if (unlikely(err)) { - if (log_ecn_error) - net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", - &oip->saddr, oip->tos); + if (log_ecn_error) { + if (oip6) + net_info_ratelimited("non-ECT from %pI6\n", + &oip6->saddr); + if (oip) + net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", + &oip->saddr, oip->tos); + } if (err > 1) { ++vxlan->dev->stats.rx_frame_errors; ++vxlan->dev->stats.rx_errors; @@ -1009,7 +1165,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) } f = vxlan_find_mac(vxlan, n->ha); - if (f && first_remote_rcu(f)->remote_ip == htonl(INADDR_ANY)) { + if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { /* bridge-local neighbor */ neigh_release(n); goto out; @@ -1027,8 +1183,14 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) if (netif_rx_ni(reply) == NET_RX_DROP) dev->stats.rx_dropped++; - } else if (vxlan->flags & VXLAN_F_L3MISS) - vxlan_ip_miss(dev, tip); + } else if (vxlan->flags & VXLAN_F_L3MISS) { + union vxlan_addr ipa = { + .sin.sin_addr.s_addr = tip, + .sa.sa_family = AF_INET, + }; + + vxlan_ip_miss(dev, &ipa); + } out: consume_skb(skb); return NETDEV_TX_OK; @@ -1050,6 +1212,16 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) return false; pip = ip_hdr(skb); n = neigh_lookup(&arp_tbl, &pip->daddr, dev); + if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { + union vxlan_addr ipa = { + .sin.sin_addr.s_addr = pip->daddr, + .sa.sa_family = AF_INET, + }; + + vxlan_ip_miss(dev, &ipa); + return false; + } + break; default: return false; @@ -1066,8 +1238,8 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) } neigh_release(n); return diff; - } else if (vxlan->flags & VXLAN_F_L3MISS) - vxlan_ip_miss(dev, pip->daddr); + } + return false; } @@ -1118,6 +1290,102 @@ static int handle_offloads(struct sk_buff *skb) return 0; } +#if IS_ENABLED(CONFIG_IPV6) +static int vxlan6_xmit_skb(struct net *net, struct vxlan_sock *vs, + struct dst_entry *dst, struct sk_buff *skb, + struct net_device *dev, struct in6_addr *saddr, + struct in6_addr *daddr, __u8 prio, __u8 ttl, + __be16 src_port, __be16 dst_port, __be32 vni) +{ + struct ipv6hdr *ip6h; + struct vxlanhdr *vxh; + struct udphdr *uh; + int min_headroom; + int err; + + if (!skb->encapsulation) { + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + } + + min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + + VXLAN_HLEN + sizeof(struct ipv6hdr) + + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + + /* Need space for new headers (invalidates iph ptr) */ + err = skb_cow_head(skb, min_headroom); + if (unlikely(err)) + return err; + + if (vlan_tx_tag_present(skb)) { + if (WARN_ON(!__vlan_put_tag(skb, + skb->vlan_proto, + vlan_tx_tag_get(skb)))) + return -ENOMEM; + + skb->vlan_tci = 0; + } + + vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); + vxh->vx_flags = htonl(VXLAN_FLAGS); + vxh->vx_vni = vni; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + + uh->len = htons(skb->len); + uh->check = 0; + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | + IPSKB_REROUTED); + skb_dst_drop(skb); + skb_dst_set(skb, dst); + + if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) { + __wsum csum = skb_checksum(skb, 0, skb->len, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + uh->check = csum_ipv6_magic(saddr, daddr, skb->len, + IPPROTO_UDP, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + } else { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~csum_ipv6_magic(saddr, daddr, + skb->len, IPPROTO_UDP, 0); + } + + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6h->version = 6; + ip6h->priority = prio; + ip6h->flow_lbl[0] = 0; + ip6h->flow_lbl[1] = 0; + ip6h->flow_lbl[2] = 0; + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + vxlan_set_owner(vs->sock->sk, skb); + + err = handle_offloads(skb); + if (err) + return err; + + ip6tunnel_xmit(skb, dev); + return 0; +} +#endif + int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, @@ -1182,15 +1450,26 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, { struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); + union vxlan_addr loopback; + union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; skb->dev = dst_vxlan->dev; __skb_pull(skb, skb_network_offset(skb)); + if (remote_ip->sa.sa_family == AF_INET) { + loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + loopback.sa.sa_family = AF_INET; +#if IS_ENABLED(CONFIG_IPV6) + } else { + loopback.sin6.sin6_addr = in6addr_loopback; + loopback.sa.sa_family = AF_INET6; +#endif + } + if (dst_vxlan->flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK), - eth_hdr(skb)->h_source); + vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; @@ -1211,11 +1490,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_rdst *rdst, bool did_rsc) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct rtable *rt; + struct rtable *rt = NULL; const struct iphdr *old_iph; struct flowi4 fl4; - __be32 dst; - __be16 src_port, dst_port; + union vxlan_addr *dst; + __be16 src_port = 0, dst_port; u32 vni; __be16 df = 0; __u8 tos, ttl; @@ -1223,9 +1502,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; vni = rdst->remote_vni; - dst = rdst->remote_ip; + dst = &rdst->remote_ip; - if (!dst) { + if (vxlan_addr_any(dst)) { if (did_rsc) { /* short-circuited back to local bridge */ vxlan_encap_bypass(skb, vxlan, vxlan); @@ -1237,7 +1516,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, old_iph = ip_hdr(skb); ttl = vxlan->ttl; - if (!ttl && IN_MULTICAST(ntohl(dst))) + if (!ttl && vxlan_addr_multicast(dst)) ttl = 1; tos = vxlan->tos; @@ -1246,48 +1525,101 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb); - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = rdst->remote_ifindex; - fl4.flowi4_tos = RT_TOS(tos); - fl4.daddr = dst; - fl4.saddr = vxlan->saddr; - - rt = ip_route_output_key(dev_net(dev), &fl4); - if (IS_ERR(rt)) { - netdev_dbg(dev, "no route to %pI4\n", &dst); - dev->stats.tx_carrier_errors++; - goto tx_error; - } + if (dst->sa.sa_family == AF_INET) { + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = rdst->remote_ifindex; + fl4.flowi4_tos = RT_TOS(tos); + fl4.daddr = dst->sin.sin_addr.s_addr; + fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; + + rt = ip_route_output_key(dev_net(dev), &fl4); + if (IS_ERR(rt)) { + netdev_dbg(dev, "no route to %pI4\n", + &dst->sin.sin_addr.s_addr); + dev->stats.tx_carrier_errors++; + goto tx_error; + } - if (rt->dst.dev == dev) { - netdev_dbg(dev, "circular route to %pI4\n", &dst); - dev->stats.collisions++; - goto rt_tx_error; - } + if (rt->dst.dev == dev) { + netdev_dbg(dev, "circular route to %pI4\n", + &dst->sin.sin_addr.s_addr); + dev->stats.collisions++; + goto tx_error; + } + + /* Bypass encapsulation if the destination is local */ + if (rt->rt_flags & RTCF_LOCAL && + !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { + struct vxlan_dev *dst_vxlan; + + ip_rt_put(rt); + dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); + if (!dst_vxlan) + goto tx_error; + vxlan_encap_bypass(skb, vxlan, dst_vxlan); + return; + } - /* Bypass encapsulation if the destination is local */ - if (rt->rt_flags & RTCF_LOCAL && - !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { - struct vxlan_dev *dst_vxlan; + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - ip_rt_put(rt); - dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); - if (!dst_vxlan) + err = vxlan_xmit_skb(dev_net(dev), vxlan->vn_sock, rt, skb, + fl4.saddr, dst->sin.sin_addr.s_addr, + tos, ttl, df, src_port, dst_port, + htonl(vni << 8)); + + if (err < 0) + goto rt_tx_error; + iptunnel_xmit_stats(err, &dev->stats, dev->tstats); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct sock *sk = vxlan->vn_sock->sock->sk; + struct dst_entry *ndst; + struct flowi6 fl6; + u32 flags; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_oif = rdst->remote_ifindex; + fl6.daddr = dst->sin6.sin6_addr; + fl6.saddr = vxlan->saddr.sin6.sin6_addr; + fl6.flowi6_proto = skb->protocol; + + if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) { + netdev_dbg(dev, "no route to %pI6\n", + &dst->sin6.sin6_addr); + dev->stats.tx_carrier_errors++; goto tx_error; - vxlan_encap_bypass(skb, vxlan, dst_vxlan); - return; - } + } - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); - ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + if (ndst->dev == dev) { + netdev_dbg(dev, "circular route to %pI6\n", + &dst->sin6.sin6_addr); + dst_release(ndst); + dev->stats.collisions++; + goto tx_error; + } - err = vxlan_xmit_skb(dev_net(dev), vxlan->vn_sock, rt, skb, - fl4.saddr, dst, tos, ttl, df, - src_port, dst_port, htonl(vni << 8)); + /* Bypass encapsulation if the destination is local */ + flags = ((struct rt6_info *)ndst)->rt6i_flags; + if (flags & RTF_LOCAL && + !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { + struct vxlan_dev *dst_vxlan; + + dst_release(ndst); + dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); + if (!dst_vxlan) + goto tx_error; + vxlan_encap_bypass(skb, vxlan, dst_vxlan); + return; + } - if (err < 0) - goto rt_tx_error; - iptunnel_xmit_stats(err, &dev->stats, dev->tstats); + ttl = ttl ? : ip6_dst_hoplimit(ndst); + + err = vxlan6_xmit_skb(dev_net(dev), vxlan->vn_sock, ndst, skb, + dev, &fl6.saddr, &fl6.daddr, 0, ttl, + src_port, dst_port, htonl(vni << 8)); +#endif + } return; @@ -1464,8 +1796,8 @@ static int vxlan_open(struct net_device *dev) if (!vs) return -ENOTCONN; - if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && - vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { + if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && + vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) { vxlan_sock_hold(vs); dev_hold(dev); queue_work(vxlan_wq, &vxlan->igmp_join); @@ -1503,8 +1835,8 @@ static int vxlan_stop(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_sock *vs = vxlan->vn_sock; - if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && - ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { + if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && + ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) { vxlan_sock_hold(vs); dev_hold(dev); queue_work(vxlan_wq, &vxlan->igmp_leave); @@ -1552,7 +1884,10 @@ static void vxlan_setup(struct net_device *dev) eth_hw_addr_random(dev); ether_setup(dev); - dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; + if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) + dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM; + else + dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = free_netdev; @@ -1597,8 +1932,10 @@ static void vxlan_setup(struct net_device *dev) static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_ID] = { .type = NLA_U32 }, [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, + [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, + [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, @@ -1669,58 +2006,132 @@ static void vxlan_del_work(struct work_struct *work) kfree_rcu(vs, rcu); } -static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, - vxlan_rcv_t *rcv, void *data) +#if IS_ENABLED(CONFIG_IPV6) +/* Create UDP socket for encapsulation receive. AF_INET6 socket + * could be used for both IPv4 and IPv6 communications, but + * users may set bindv6only=1. + */ +static int create_v6_sock(struct net *net, __be16 port, struct socket **psock) +{ + struct sock *sk; + struct socket *sock; + struct sockaddr_in6 vxlan_addr = { + .sin6_family = AF_INET6, + .sin6_port = port, + }; + int rc, val = 1; + + rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock); + if (rc < 0) { + pr_debug("UDPv6 socket create failed\n"); + return rc; + } + + /* Put in proper namespace */ + sk = sock->sk; + sk_change_net(sk, net); + + kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY, + (char *)&val, sizeof(val)); + rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr, + sizeof(struct sockaddr_in6)); + if (rc < 0) { + pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n", + &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc); + sk_release_kernel(sk); + return rc; + } + /* At this point, IPv6 module should have been loaded in + * sock_create_kern(). + */ + BUG_ON(!ipv6_stub); + + *psock = sock; + /* Disable multicast loopback */ + inet_sk(sk)->mc_loop = 0; + return 0; +} + +#else + +static int create_v6_sock(struct net *net, __be16 port, struct socket **psock) +{ + return -EPFNOSUPPORT; +} +#endif + +static int create_v4_sock(struct net *net, __be16 port, struct socket **psock) { - struct vxlan_net *vn = net_generic(net, vxlan_net_id); - struct vxlan_sock *vs; struct sock *sk; + struct socket *sock; struct sockaddr_in vxlan_addr = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = port, }; int rc; - unsigned int h; - - vs = kmalloc(sizeof(*vs), GFP_KERNEL); - if (!vs) { - pr_debug("memory alocation failure\n"); - return ERR_PTR(-ENOMEM); - } - - for (h = 0; h < VNI_HASH_SIZE; ++h) - INIT_HLIST_HEAD(&vs->vni_list[h]); - - INIT_WORK(&vs->del_work, vxlan_del_work); /* Create UDP socket for encapsulation receive. */ - rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock); + rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (rc < 0) { pr_debug("UDP socket create failed\n"); - kfree(vs); - return ERR_PTR(rc); + return rc; } /* Put in proper namespace */ - sk = vs->sock->sk; + sk = sock->sk; sk_change_net(sk, net); - rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr, + rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr, sizeof(vxlan_addr)); if (rc < 0) { pr_debug("bind for UDP socket %pI4:%u (%d)\n", &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc); sk_release_kernel(sk); + return rc; + } + + *psock = sock; + /* Disable multicast loopback */ + inet_sk(sk)->mc_loop = 0; + return 0; +} + +/* Create new listen socket if needed */ +static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, + vxlan_rcv_t *rcv, void *data, bool ipv6) +{ + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + struct vxlan_sock *vs; + struct socket *sock; + struct sock *sk; + int rc = 0; + unsigned int h; + + vs = kmalloc(sizeof(*vs), GFP_KERNEL); + if (!vs) + return ERR_PTR(-ENOMEM); + + for (h = 0; h < VNI_HASH_SIZE; ++h) + INIT_HLIST_HEAD(&vs->vni_list[h]); + + INIT_WORK(&vs->del_work, vxlan_del_work); + + if (ipv6) + rc = create_v6_sock(net, port, &sock); + else + rc = create_v4_sock(net, port, &sock); + if (rc < 0) { kfree(vs); return ERR_PTR(rc); } + + vs->sock = sock; + sk = sock->sk; atomic_set(&vs->refcnt, 1); vs->rcv = rcv; vs->data = data; - /* Disable multicast loopback */ - inet_sk(sk)->mc_loop = 0; spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); spin_unlock(&vn->sock_lock); @@ -1728,18 +2139,24 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, /* Mark socket as an encapsulation socket. */ udp_sk(sk)->encap_type = 1; udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; - udp_encap_enable(); +#if IS_ENABLED(CONFIG_IPV6) + if (ipv6) + ipv6_stub->udpv6_encap_enable(); + else +#endif + udp_encap_enable(); + return vs; } struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, vxlan_rcv_t *rcv, void *data, - bool no_share) + bool no_share, bool ipv6) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; - vs = vxlan_socket_create(net, port, rcv, data); + vs = vxlan_socket_create(net, port, rcv, data, ipv6); if (!IS_ERR(vs)) return vs; @@ -1772,7 +2189,7 @@ static void vxlan_sock_work(struct work_struct *work) __be16 port = vxlan->dst_port; struct vxlan_sock *nvs; - nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false); + nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6); spin_lock(&vn->sock_lock); if (!IS_ERR(nvs)) vxlan_vs_add_dev(nvs, vxlan); @@ -1789,6 +2206,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, struct vxlan_rdst *dst = &vxlan->default_dst; __u32 vni; int err; + bool use_ipv6 = false; if (!data[IFLA_VXLAN_ID]) return -EINVAL; @@ -1796,11 +2214,32 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, vni = nla_get_u32(data[IFLA_VXLAN_ID]); dst->remote_vni = vni; - if (data[IFLA_VXLAN_GROUP]) - dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]); + if (data[IFLA_VXLAN_GROUP]) { + dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]); + dst->remote_ip.sa.sa_family = AF_INET; + } else if (data[IFLA_VXLAN_GROUP6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6], + sizeof(struct in6_addr)); + dst->remote_ip.sa.sa_family = AF_INET6; + use_ipv6 = true; + } - if (data[IFLA_VXLAN_LOCAL]) - vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); + if (data[IFLA_VXLAN_LOCAL]) { + vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); + vxlan->saddr.sa.sa_family = AF_INET; + } else if (data[IFLA_VXLAN_LOCAL6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + /* TODO: respect scope id */ + nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6], + sizeof(struct in6_addr)); + vxlan->saddr.sa.sa_family = AF_INET6; + use_ipv6 = true; + } if (data[IFLA_VXLAN_LINK] && (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { @@ -1812,12 +2251,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, return -ENODEV; } +#if IS_ENABLED(CONFIG_IPV6) + if (use_ipv6) { + struct inet6_dev *idev = __in6_dev_get(lowerdev); + if (idev && idev->cnf.disable_ipv6) { + pr_info("IPv6 is disabled via sysctl\n"); + return -EPERM; + } + vxlan->flags |= VXLAN_F_IPV6; + } +#endif + if (!tb[IFLA_MTU]) - dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; + dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); /* update header length based on lower device */ dev->hard_header_len = lowerdev->hard_header_len + - VXLAN_HEADROOM; + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); } if (data[IFLA_VXLAN_TOS]) @@ -1868,7 +2318,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, /* create an fdb entry for default destination */ err = vxlan_fdb_create(vxlan, all_zeros_mac, - vxlan->default_dst.remote_ip, + &vxlan->default_dst.remote_ip, NUD_REACHABLE|NUD_PERMANENT, NLM_F_EXCL|NLM_F_CREATE, vxlan->dst_port, vxlan->default_dst.remote_vni, @@ -1905,9 +2355,9 @@ static size_t vxlan_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ - nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */ + nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ - nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */ + nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ @@ -1934,14 +2384,36 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) goto nla_put_failure; - if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip)) - goto nla_put_failure; + if (!vxlan_addr_any(&dst->remote_ip)) { + if (dst->remote_ip.sa.sa_family == AF_INET) { + if (nla_put_be32(skb, IFLA_VXLAN_GROUP, + dst->remote_ip.sin.sin_addr.s_addr)) + goto nla_put_failure; +#if IS_ENABLED(CONFIG_IPV6) + } else { + if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr), + &dst->remote_ip.sin6.sin6_addr)) + goto nla_put_failure; +#endif + } + } if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) goto nla_put_failure; - if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) - goto nla_put_failure; + if (!vxlan_addr_any(&vxlan->saddr)) { + if (vxlan->saddr.sa.sa_family == AF_INET) { + if (nla_put_be32(skb, IFLA_VXLAN_LOCAL, + vxlan->saddr.sin.sin_addr.s_addr)) + goto nla_put_failure; +#if IS_ENABLED(CONFIG_IPV6) + } else { + if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr), + &vxlan->saddr.sin6.sin6_addr)) + goto nla_put_failure; +#endif + } + } if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || diff --git a/include/net/vxlan.h b/include/net/vxlan.h index ad342e3..d2b88ca 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -25,7 +25,7 @@ struct vxlan_sock { struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, vxlan_rcv_t *rcv, void *data, - bool no_share); + bool no_share, bool ipv6); void vxlan_sock_release(struct vxlan_sock *vs); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 04c0e7a..80394e8 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -314,6 +314,8 @@ enum { IFLA_VXLAN_L2MISS, IFLA_VXLAN_L3MISS, IFLA_VXLAN_PORT, /* destination port */ + IFLA_VXLAN_GROUP6, + IFLA_VXLAN_LOCAL6, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 36848bd..a006024 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -123,7 +123,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms) vxlan_port = vxlan_vport(vport); strncpy(vxlan_port->name, parms->name, IFNAMSIZ); - vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true); + vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, false); if (IS_ERR(vs)) { ovs_vport_free(vport); return (void *)vs; -- cgit v0.10.2 From e15a00aafa4b7953ad717d3cb1ad7acf4ff76945 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:34 +0800 Subject: vxlan: add ipv6 route short circuit support route short circuit only has IPv4 part, this patch adds the IPv6 part. nd_tbl will be needed. Cc: David S. Miller Cc: David Stevens Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index faf131e..c833763 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1200,7 +1200,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); struct neighbour *n; - struct iphdr *pip; if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) return false; @@ -1208,6 +1207,9 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) n = NULL; switch (ntohs(eth_hdr(skb)->h_proto)) { case ETH_P_IP: + { + struct iphdr *pip; + if (!pskb_may_pull(skb, sizeof(struct iphdr))) return false; pip = ip_hdr(skb); @@ -1223,6 +1225,29 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) } break; + } +#if IS_ENABLED(CONFIG_IPV6) + case ETH_P_IPV6: + { + struct ipv6hdr *pip6; + + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) + return false; + pip6 = ipv6_hdr(skb); + n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); + if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { + union vxlan_addr ipa = { + .sin6.sin6_addr = pip6->daddr, + .sa.sa_family = AF_INET6, + }; + + vxlan_ip_miss(dev, &ipa); + return false; + } + + break; + } +#endif default: return false; } @@ -1659,7 +1684,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) did_rsc = false; if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && - ntohs(eth->h_proto) == ETH_P_IP) { + (ntohs(eth->h_proto) == ETH_P_IP || + ntohs(eth->h_proto) == ETH_P_IPV6)) { did_rsc = route_shortcircuit(dev, skb); if (did_rsc) f = vxlan_find_mac(vxlan, eth->h_dest); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 5339cab..bcf9573 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -153,6 +153,7 @@ struct ipv6_stub { int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); void (*udpv6_encap_enable)(void); + struct neigh_table *nd_tbl; }; extern const struct ipv6_stub *ipv6_stub __read_mostly; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0c9c22f..1996a7c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -815,6 +815,7 @@ static const struct ipv6_stub ipv6_stub_impl = { .ipv6_sock_mc_drop = ipv6_sock_mc_drop, .ipv6_dst_lookup = ip6_dst_lookup, .udpv6_encap_enable = udpv6_encap_enable, + .nd_tbl = &nd_tbl, }; static int __init inet6_init(void) -- cgit v0.10.2 From f39dc1023d6b9933528638a0c2dd618b4fdf664e Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:35 +0800 Subject: ipv6: move in6_dev_finish_destroy() into core kernel in6_dev_put() will be needed by vxlan module, so is in6_dev_finish_destroy(). Cc: David S. Miller Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index baaaead..2a66eaa 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -304,36 +304,6 @@ err_ip: return -ENOMEM; } -static void snmp6_free_dev(struct inet6_dev *idev) -{ - kfree(idev->stats.icmpv6msgdev); - kfree(idev->stats.icmpv6dev); - snmp_mib_free((void __percpu **)idev->stats.ipv6); -} - -/* Nobody refers to this device, we may destroy it. */ - -void in6_dev_finish_destroy(struct inet6_dev *idev) -{ - struct net_device *dev = idev->dev; - - WARN_ON(!list_empty(&idev->addr_list)); - WARN_ON(idev->mc_list != NULL); - WARN_ON(timer_pending(&idev->rs_timer)); - -#ifdef NET_REFCNT_DEBUG - pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); -#endif - dev_put(dev); - if (!idev->dead) { - pr_warn("Freeing alive inet6 device %p\n", idev); - return; - } - snmp6_free_dev(idev); - kfree_rcu(idev, rcu); -} -EXPORT_SYMBOL(in6_dev_finish_destroy); - static struct inet6_dev *ipv6_add_dev(struct net_device *dev) { struct inet6_dev *ndev; diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index a864033..4c11cbc 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -6,6 +6,7 @@ #include #include #include +#include #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) @@ -117,3 +118,33 @@ const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL EXPORT_SYMBOL(in6addr_interfacelocal_allrouters); const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_sitelocal_allrouters); + +static void snmp6_free_dev(struct inet6_dev *idev) +{ + kfree(idev->stats.icmpv6msgdev); + kfree(idev->stats.icmpv6dev); + snmp_mib_free((void __percpu **)idev->stats.ipv6); +} + +/* Nobody refers to this device, we may destroy it. */ + +void in6_dev_finish_destroy(struct inet6_dev *idev) +{ + struct net_device *dev = idev->dev; + + WARN_ON(!list_empty(&idev->addr_list)); + WARN_ON(idev->mc_list != NULL); + WARN_ON(timer_pending(&idev->rs_timer)); + +#ifdef NET_REFCNT_DEBUG + pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); +#endif + dev_put(dev); + if (!idev->dead) { + pr_warn("Freeing alive inet6 device %p\n", idev); + return; + } + snmp6_free_dev(idev); + kfree_rcu(idev, rcu); +} +EXPORT_SYMBOL(in6_dev_finish_destroy); -- cgit v0.10.2 From f564f45c451809aa3b74f577754528520d315ac1 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:36 +0800 Subject: vxlan: add ipv6 proxy support This patch adds the IPv6 version of "arp_reduce", ndisc_send_na() will be needed. Cc: David S. Miller Cc: David Stevens Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c833763..3ffb22d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1196,6 +1196,70 @@ out: return NETDEV_TX_OK; } +#if IS_ENABLED(CONFIG_IPV6) +static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct neighbour *n; + union vxlan_addr ipa; + const struct ipv6hdr *iphdr; + const struct in6_addr *saddr, *daddr; + struct nd_msg *msg; + struct inet6_dev *in6_dev = NULL; + + in6_dev = __in6_dev_get(dev); + if (!in6_dev) + goto out; + + if (!pskb_may_pull(skb, skb->len)) + goto out; + + iphdr = ipv6_hdr(skb); + saddr = &iphdr->saddr; + daddr = &iphdr->daddr; + + if (ipv6_addr_loopback(daddr) || + ipv6_addr_is_multicast(daddr)) + goto out; + + msg = (struct nd_msg *)skb_transport_header(skb); + if (msg->icmph.icmp6_code != 0 || + msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) + goto out; + + n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev); + + if (n) { + struct vxlan_fdb *f; + + if (!(n->nud_state & NUD_CONNECTED)) { + neigh_release(n); + goto out; + } + + f = vxlan_find_mac(vxlan, n->ha); + if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { + /* bridge-local neighbor */ + neigh_release(n); + goto out; + } + + ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target, + !!in6_dev->cnf.forwarding, + true, false, false); + neigh_release(n); + } else if (vxlan->flags & VXLAN_F_L3MISS) { + ipa.sin6.sin6_addr = *daddr; + ipa.sa.sa_family = AF_INET6; + vxlan_ip_miss(dev, &ipa); + } + +out: + consume_skb(skb); + return NETDEV_TX_OK; +} +#endif + static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -1677,8 +1741,22 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb); eth = eth_hdr(skb); - if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP) - return arp_reduce(dev, skb); + if ((vxlan->flags & VXLAN_F_PROXY)) { + if (ntohs(eth->h_proto) == ETH_P_ARP) + return arp_reduce(dev, skb); +#if IS_ENABLED(CONFIG_IPV6) + else if (ntohs(eth->h_proto) == ETH_P_IPV6 && + skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) && + ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct nd_msg *msg; + + msg = (struct nd_msg *)skb_transport_header(skb); + if (msg->icmph.icmp6_code == 0 && + msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) + return neigh_reduce(dev, skb); + } +#endif + } f = vxlan_find_mac(vxlan, eth->h_dest); did_rsc = false; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index bcf9573..fb314de 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -153,6 +153,10 @@ struct ipv6_stub { int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); void (*udpv6_encap_enable)(void); + void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh, + const struct in6_addr *daddr, + const struct in6_addr *solicited_addr, + bool router, bool solicited, bool override, bool inc_opt); struct neigh_table *nd_tbl; }; extern const struct ipv6_stub *ipv6_stub __read_mostly; diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 6fea323..3c4211f 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -204,6 +204,11 @@ extern void ndisc_send_ns(struct net_device *dev, extern void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr); +extern void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, + const struct in6_addr *daddr, + const struct in6_addr *solicited_addr, + bool router, bool solicited, bool override, + bool inc_opt); extern void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 1996a7c..136fe55 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -56,6 +56,7 @@ #include #include #include +#include #ifdef CONFIG_IPV6_TUNNEL #include #endif @@ -815,6 +816,7 @@ static const struct ipv6_stub ipv6_stub_impl = { .ipv6_sock_mc_drop = ipv6_sock_mc_drop, .ipv6_dst_lookup = ip6_dst_lookup, .udpv6_encap_enable = udpv6_encap_enable, + .ndisc_send_na = ndisc_send_na, .nd_tbl = &nd_tbl, }; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index bb6fd95..14bd2f9 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -461,10 +461,10 @@ static void ndisc_send_skb(struct sk_buff *skb, rcu_read_unlock(); } -static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, - const struct in6_addr *daddr, - const struct in6_addr *solicited_addr, - bool router, bool solicited, bool override, bool inc_opt) +void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, + const struct in6_addr *daddr, + const struct in6_addr *solicited_addr, + bool router, bool solicited, bool override, bool inc_opt) { struct sk_buff *skb; struct in6_addr tmpaddr; -- cgit v0.10.2 From d949d826c09fb65e230f55868ff70dc581ec06fa Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:37 +0800 Subject: ipv6: Add generic UDP Tunnel segmentation Similar to commit 731362674580cb0c696cd1b1a03d8461a10cf90a (tunneling: Add generic Tunnel segmentation) This patch adds generic tunneling offloading support for IPv6-UDP based tunnels. This can be used by tunneling protocols like VXLAN. Cc: Jesse Gross Cc: Pravin B Shelar Cc: Stephen Hemminger Cc: David S. Miller Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a263b99..d82de72 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -91,6 +91,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, unsigned int unfrag_ip6hlen; u8 *prevhdr; int offset = 0; + bool tunnel; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_UDP | @@ -106,6 +107,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; + tunnel = skb->encapsulation; ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); @@ -126,7 +128,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, ipv6h = ipv6_hdr(skb); ipv6h->payload_len = htons(skb->len - skb->mac_len - sizeof(*ipv6h)); - if (proto == IPPROTO_UDP) { + if (!tunnel && proto == IPPROTO_UDP) { unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 5d1b8d7..7e5e5ac 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -21,26 +21,79 @@ static int udp6_ufo_send_check(struct sk_buff *skb) const struct ipv6hdr *ipv6h; struct udphdr *uh; - /* UDP Tunnel offload on ipv6 is not yet supported. */ - if (skb->encapsulation) - return -EINVAL; - if (!pskb_may_pull(skb, sizeof(*uh))) return -EINVAL; - ipv6h = ipv6_hdr(skb); - uh = udp_hdr(skb); + if (likely(!skb->encapsulation)) { + ipv6h = ipv6_hdr(skb); + uh = udp_hdr(skb); + + uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, + IPPROTO_UDP, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + } - uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, - IPPROTO_UDP, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; return 0; } +static struct sk_buff *skb_udp6_tunnel_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + int mac_len = skb->mac_len; + int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + int outer_hlen; + netdev_features_t enc_features; + + if (unlikely(!pskb_may_pull(skb, tnl_hlen))) + goto out; + + skb->encapsulation = 0; + __skb_pull(skb, tnl_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb_inner_network_offset(skb)); + skb->mac_len = skb_inner_network_offset(skb); + + /* segment inner packet. */ + enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + segs = skb_mac_gso_segment(skb, enc_features); + if (!segs || IS_ERR(segs)) + goto out; + + outer_hlen = skb_tnl_header_len(skb); + skb = segs; + do { + struct udphdr *uh; + struct ipv6hdr *ipv6h; + int udp_offset = outer_hlen - tnl_hlen; + u32 len; + + skb->mac_len = mac_len; + + skb_push(skb, outer_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, mac_len); + skb_set_transport_header(skb, udp_offset); + uh = udp_hdr(skb); + uh->len = htons(skb->len - udp_offset); + ipv6h = ipv6_hdr(skb); + len = skb->len - udp_offset; + + uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, + len, IPPROTO_UDP, 0); + uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0)); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + skb->ip_summed = CHECKSUM_NONE; + } while ((skb = skb->next)); +out: + return segs; +} + static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, - netdev_features_t features) + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; @@ -75,47 +128,51 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, goto out; } - /* Do software UFO. Complete and fill in the UDP checksum as HW cannot - * do checksum of UDP packets sent as multiple IP fragments. - */ - offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len - offset, 0); - offset += skb->csum_offset; - *(__sum16 *)(skb->data + offset) = csum_fold(csum); - skb->ip_summed = CHECKSUM_NONE; - - /* Check if there is enough headroom to insert fragment header. */ - tnl_hlen = skb_tnl_header_len(skb); - if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) { - if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) - goto out; + if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) + segs = skb_udp6_tunnel_segment(skb, features); + else { + /* Do software UFO. Complete and fill in the UDP checksum as HW cannot + * do checksum of UDP packets sent as multiple IP fragments. + */ + offset = skb_checksum_start_offset(skb); + csum = skb_checksum(skb, offset, skb->len - offset, 0); + offset += skb->csum_offset; + *(__sum16 *)(skb->data + offset) = csum_fold(csum); + skb->ip_summed = CHECKSUM_NONE; + + /* Check if there is enough headroom to insert fragment header. */ + tnl_hlen = skb_tnl_header_len(skb); + if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) { + if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) + goto out; + } + + /* Find the unfragmentable header and shift it left by frag_hdr_sz + * bytes to insert fragment header. + */ + unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); + nexthdr = *prevhdr; + *prevhdr = NEXTHDR_FRAGMENT; + unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + + unfrag_ip6hlen + tnl_hlen; + packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; + memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); + + SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; + skb->mac_header -= frag_hdr_sz; + skb->network_header -= frag_hdr_sz; + + fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); + fptr->nexthdr = nexthdr; + fptr->reserved = 0; + ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb)); + + /* Fragment the skb. ipv6 header and the remaining fields of the + * fragment header are updated in ipv6_gso_segment() + */ + segs = skb_segment(skb, features); } - /* Find the unfragmentable header and shift it left by frag_hdr_sz - * bytes to insert fragment header. - */ - unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); - nexthdr = *prevhdr; - *prevhdr = NEXTHDR_FRAGMENT; - unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + - unfrag_ip6hlen + tnl_hlen; - packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; - memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); - - SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; - skb->mac_header -= frag_hdr_sz; - skb->network_header -= frag_hdr_sz; - - fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); - fptr->nexthdr = nexthdr; - fptr->reserved = 0; - ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb)); - - /* Fragment the skb. ipv6 header and the remaining fields of the - * fragment header are updated in ipv6_gso_segment() - */ - segs = skb_segment(skb, features); - out: return segs; } -- cgit v0.10.2 From eb3c0d83cc78361a28e52e514a7095fdbf771e7e Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 31 Aug 2013 13:44:38 +0800 Subject: net: unify skb_udp_tunnel_segment() and skb_udp6_tunnel_segment() As suggested by Pravin, we can unify the code in case of duplicated code. Cc: Pravin Shelar Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0b24508..74d2c95 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2337,7 +2337,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, uh->len = htons(skb->len - udp_offset); /* csum segment if tunnel sets skb with csum. */ - if (unlikely(uh->check)) { + if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) { struct iphdr *iph = ip_hdr(skb); uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, @@ -2348,7 +2348,18 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, if (uh->check == 0) uh->check = CSUM_MANGLED_0; + } else if (protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + u32 len = skb->len - udp_offset; + + uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, + len, IPPROTO_UDP, 0); + uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0)); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + skb->ip_summed = CHECKSUM_NONE; } + skb->protocol = protocol; } while ((skb = skb->next)); out: diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 7e5e5ac..6055951 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -38,60 +38,6 @@ static int udp6_ufo_send_check(struct sk_buff *skb) return 0; } -static struct sk_buff *skb_udp6_tunnel_segment(struct sk_buff *skb, - netdev_features_t features) -{ - struct sk_buff *segs = ERR_PTR(-EINVAL); - int mac_len = skb->mac_len; - int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); - int outer_hlen; - netdev_features_t enc_features; - - if (unlikely(!pskb_may_pull(skb, tnl_hlen))) - goto out; - - skb->encapsulation = 0; - __skb_pull(skb, tnl_hlen); - skb_reset_mac_header(skb); - skb_set_network_header(skb, skb_inner_network_offset(skb)); - skb->mac_len = skb_inner_network_offset(skb); - - /* segment inner packet. */ - enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); - segs = skb_mac_gso_segment(skb, enc_features); - if (!segs || IS_ERR(segs)) - goto out; - - outer_hlen = skb_tnl_header_len(skb); - skb = segs; - do { - struct udphdr *uh; - struct ipv6hdr *ipv6h; - int udp_offset = outer_hlen - tnl_hlen; - u32 len; - - skb->mac_len = mac_len; - - skb_push(skb, outer_hlen); - skb_reset_mac_header(skb); - skb_set_network_header(skb, mac_len); - skb_set_transport_header(skb, udp_offset); - uh = udp_hdr(skb); - uh->len = htons(skb->len - udp_offset); - ipv6h = ipv6_hdr(skb); - len = skb->len - udp_offset; - - uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, - len, IPPROTO_UDP, 0); - uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0)); - if (uh->check == 0) - uh->check = CSUM_MANGLED_0; - skb->ip_summed = CHECKSUM_NONE; - } while ((skb = skb->next)); -out: - return segs; -} - static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { @@ -129,7 +75,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, } if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) - segs = skb_udp6_tunnel_segment(skb, features); + segs = skb_udp_tunnel_segment(skb, features); else { /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. -- cgit v0.10.2 From 991ca269ed71b2b7aee27f5cb8bf64b689cc20bf Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 30 Aug 2013 13:51:16 -0400 Subject: qlcnic: Enhance PVID handling for 84xx adapters o PF driver should not indicate PVID configuration to VF driver. As adapter supports VLAN stripping, VF driver should stay agnostic to any PVID configuration. o Return "QLC_NO_VLAN_MODE(= 0)" to VFD when PVID is configured. VF driver should be in no VLAN configuration mode. Signed-off-by: Manish Chopra Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 22bd425..16df5fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -2069,6 +2069,14 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false; } +static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + + return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false; +} + static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) { unsigned short device = adapter->pdev->device; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 26f9aa6..652cc13 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -398,14 +398,10 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, } static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd, u32 cap) + struct qlcnic_cmd_args *cmd) { - if (cap & QLC_83XX_PVID_STRIP_CAPABILITY) { - adapter->rx_pvid = 0; - } else { - adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff; - adapter->flags &= ~QLCNIC_TAGGING_ENABLED; - } + adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff; + adapter->flags &= ~QLCNIC_TAGGING_ENABLED; return 0; } @@ -441,9 +437,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter, { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_cmd_args cmd; - int ret, cap; + int ret = 0; - cap = info->capabilities; ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); if (ret) return ret; @@ -459,7 +454,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter, ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); break; case QLC_PVID_MODE: - ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd, cap); + ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); break; } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 2d6faf0..95b7710 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1183,10 +1183,19 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_vport *vp = vf->vp; u8 cmd_op, mode = vp->vlan_mode; + struct qlcnic_adapter *adapter; + + adapter = vf->adapter; cmd_op = trans->req_hdr->cmd_op; cmd->rsp.arg[0] |= 1 << 25; + /* For 84xx adapter in case of PVID , PFD should send vlan mode as + * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response + */ + if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE) + return 0; + switch (mode) { case QLC_GUEST_VLAN_MODE: cmd->rsp.arg[1] = mode | 1 << 8; -- cgit v0.10.2 From 60dcbcb02da4982351732fe2449093a11659ce31 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 30 Aug 2013 13:51:17 -0400 Subject: qlcnic: Remove inline keyword o Remove inline keyword from function prototypes wherever it is not appropriate. Signed-off-by: Manish Chopra Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 8fce1d3..36223a9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -723,9 +723,8 @@ void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, pr_info("\n"); } -static inline void -qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd) +static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; int opcode = LSW(cmd->req.arg[0]); @@ -3544,7 +3543,7 @@ qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter, complete(&cmd->completion); } -static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) +static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct list_head *head = &mbx->cmd_q; @@ -3564,7 +3563,7 @@ static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) spin_unlock(&mbx->queue_lock); } -static inline int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) +static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; @@ -3592,8 +3591,8 @@ static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter, QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); } -static inline void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd) +static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; @@ -3653,9 +3652,9 @@ void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter) qlcnic_83xx_flush_mbx_queue(adapter); } -static inline int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd, - unsigned long *timeout) +static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd, + unsigned long *timeout) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; @@ -3680,8 +3679,8 @@ static inline int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, return -EBUSY; } -static inline int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd) +static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { u8 mac_cmd_rcode; u32 fw_data; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index a969ac2..6fac393 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1950,8 +1950,8 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } -static inline void qlcnic_83xx_get_fw_file_name(struct qlcnic_adapter *adapter, - char *file_name) +static void qlcnic_83xx_get_fw_file_name(struct qlcnic_adapter *adapter, + char *file_name) { struct pci_dev *pdev = adapter->pdev; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 7dde3ba..d1ed4e3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -969,8 +969,8 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev, return 0; } -static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, - int index) +static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, + int index) { struct pci_dev *pdev = adapter->pdev; unsigned short subsystem_vendor; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 95b7710..a9bc651 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1781,8 +1781,8 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf, return 0; } -static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter, - struct qlcnic_vport *vp, int vf) +static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter, + struct qlcnic_vport *vp, int vf) { __u32 vlan = 0; -- cgit v0.10.2 From 7010bb65ce01278abe6709fe90407183abc6cbef Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Fri, 30 Aug 2013 13:51:18 -0400 Subject: qlcnic: Use firmware recommended dump capture mask as default Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 16df5fe..4e74376 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1403,7 +1403,6 @@ struct qlcnic_esw_statistics { struct __qlcnic_esw_statistics tx; }; -#define QLCNIC_DUMP_MASK_DEF 0x1f #define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed #define QLCNIC_ENABLE_FW_DUMP 0xaddfeed #define QLCNIC_DISABLE_FW_DUMP 0xbadfeed diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 79e54ef..b7871fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1082,7 +1082,10 @@ flash_temp: } tmpl_hdr = ahw->fw_dump.tmpl_hdr; - tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; + tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask; + dev_info(&adapter->pdev->dev, + "Default minidump capture mask 0x%x\n", + tmpl_hdr->cap_mask); if ((tmpl_hdr->version & 0xfffff) >= 0x20001) ahw->fw_dump.use_pex_dma = true; -- cgit v0.10.2 From 890b6e023bd7ff9b5fc89750d9ab2cd414fa302e Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Fri, 30 Aug 2013 13:51:19 -0400 Subject: qlcnic: Store firmware dump state in CAMRAM register -Use CAMRAM register to store firmware dump state in adapter instead of maintaining it in each function driver separately. -Return appropriate error code on failure Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 4e74376..f9cd274 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -392,7 +392,7 @@ struct qlcnic_dump_template_hdr { struct qlcnic_fw_dump { u8 clr; /* flag to indicate if dump is cleared */ - u8 enable; /* enable/disable dump */ + bool enable; /* enable/disable dump */ u32 size; /* total size of the dump */ void *data; /* dump data area */ struct qlcnic_dump_template_hdr *tmpl_hdr; @@ -1477,6 +1477,8 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter); void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); int qlcnic_dump_fw(struct qlcnic_adapter *); +int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *); +bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *); /* Functions from qlcnic_init.c */ void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 0fc5616..053a3a1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -297,6 +297,7 @@ struct qlc_83xx_reset { #define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1 #define QLC_83XX_IDC_GRACEFULL_RESET 0x2 +#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4 #define QLC_83XX_IDC_TIMESTAMP 0 #define QLC_83XX_IDC_DURATION 1 #define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 7b0c90e..332aa71 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1509,6 +1509,68 @@ static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) adapter->ahw->msg_enable = msglvl; } +int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 val; + + if (qlcnic_84xx_check(adapter)) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + + qlcnic_83xx_unlock_driver(adapter); + } else { + fw_dump->enable = true; + } + + dev_info(&adapter->pdev->dev, "FW dump enabled\n"); + + return 0; +} + +static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 val; + + if (qlcnic_84xx_check(adapter)) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val |= QLC_83XX_IDC_DISABLE_FW_DUMP; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + + qlcnic_83xx_unlock_driver(adapter); + } else { + fw_dump->enable = false; + } + + dev_info(&adapter->pdev->dev, "FW dump disabled\n"); + + return 0; +} + +bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + bool state; + u32 val; + + if (qlcnic_84xx_check(adapter)) { + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true; + } else { + state = fw_dump->enable; + } + + return state; +} + static int qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) { @@ -1525,7 +1587,7 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) else dump->len = 0; - if (!fw_dump->enable) + if (!qlcnic_check_fw_dump_state(adapter)) dump->flag = ETH_FW_DUMP_DISABLE; else dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; @@ -1573,77 +1635,111 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, return 0; } +static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct net_device *netdev = adapter->netdev; + + if (!qlcnic_check_fw_dump_state(adapter)) { + netdev_info(netdev, + "Can not change driver mask to 0x%x. FW dump not enabled\n", + mask); + return -EOPNOTSUPP; + } + + fw_dump->tmpl_hdr->drv_cap_mask = mask; + netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask); + return 0; +} + static int qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) { - int i; struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + bool valid_mask = false; + int i, ret = 0; u32 state; switch (val->flag) { case QLCNIC_FORCE_FW_DUMP_KEY: if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW dump not supported\n"); - return -ENOTSUPP; + ret = -EOPNOTSUPP; + break; } - if (!fw_dump->enable) { + + if (!qlcnic_check_fw_dump_state(adapter)) { netdev_info(netdev, "FW dump not enabled\n"); - return 0; + ret = -EOPNOTSUPP; + break; } + if (fw_dump->clr) { netdev_info(netdev, - "Previous dump not cleared, not forcing dump\n"); - return 0; + "Previous dump not cleared, not forcing dump\n"); + break; } + netdev_info(netdev, "Forcing a FW dump\n"); qlcnic_dev_request_reset(adapter, val->flag); break; case QLCNIC_DISABLE_FW_DUMP: - if (fw_dump->enable && fw_dump->tmpl_hdr) { - netdev_info(netdev, "Disabling FW dump\n"); - fw_dump->enable = 0; + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + ret = -EOPNOTSUPP; + break; } - return 0; + + ret = qlcnic_disable_fw_dump_state(adapter); + break; + case QLCNIC_ENABLE_FW_DUMP: if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW dump not supported\n"); - return -ENOTSUPP; - } - if (!fw_dump->enable) { - netdev_info(netdev, "Enabling FW dump\n"); - fw_dump->enable = 1; + ret = -EOPNOTSUPP; + break; } - return 0; + + ret = qlcnic_enable_fw_dump_state(adapter); + break; + case QLCNIC_FORCE_FW_RESET: netdev_info(netdev, "Forcing a FW reset\n"); qlcnic_dev_request_reset(adapter, val->flag); adapter->flags &= ~QLCNIC_FW_RESET_OWNER; - return 0; + break; +; case QLCNIC_SET_QUIESCENT: case QLCNIC_RESET_QUIESCENT: state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) netdev_info(netdev, "Device in FAILED state\n"); - return 0; + break; + default: if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW dump not supported\n"); - return -ENOTSUPP; + ret = -EOPNOTSUPP; + break; } + for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) { if (val->flag == qlcnic_fw_dump_level[i]) { - fw_dump->tmpl_hdr->drv_cap_mask = - val->flag; - netdev_info(netdev, "Driver mask changed to: 0x%x\n", - fw_dump->tmpl_hdr->drv_cap_mask); - return 0; + valid_mask = true; + break; } } - netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag); - return -EINVAL; + + if (valid_mask) { + ret = qlcnic_set_dump_mask(adapter, val->flag); + } else { + netdev_info(netdev, "Invalid dump level: 0x%x\n", + val->flag); + ret = -EINVAL; + } } - return 0; + return ret; } const struct ethtool_ops qlcnic_ethtool_ops = { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index d1ed4e3..e380f03 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3045,7 +3045,7 @@ skip_ack_check: qlcnic_api_unlock(adapter); rtnl_lock(); - if (adapter->ahw->fw_dump.enable && + if (qlcnic_check_fw_dump_state(adapter) && (adapter->flags & QLCNIC_FW_RESET_OWNER)) { QLCDB(adapter, DRV, "Take FW dump\n"); qlcnic_dump_fw(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index b7871fe..1551360 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1092,7 +1092,7 @@ flash_temp: else ahw->fw_dump.use_pex_dma = false; - ahw->fw_dump.enable = 1; + qlcnic_enable_fw_dump_state(adapter); return 0; } @@ -1115,7 +1115,11 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) ahw = adapter->ahw; - if (!fw_dump->enable) { + /* Return if we don't have firmware dump template header */ + if (!tmpl_hdr) + return -EIO; + + if (!qlcnic_check_fw_dump_state(adapter)) { dev_info(&adapter->pdev->dev, "Dump not enabled\n"); return -EIO; } -- cgit v0.10.2 From 4460f2e83c61e21c2e78a28a327b716252b13069 Mon Sep 17 00:00:00 2001 From: Pratik Pujar Date: Fri, 30 Aug 2013 13:51:20 -0400 Subject: qlcnic: Add AER callback handlers. o Generic AER callback handlers will make use of qlcnic_hardware_ops structure to call adapter specific handlers. Signed-off-by: Pratik Pujar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index f9cd274..318663f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1479,6 +1479,10 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); int qlcnic_dump_fw(struct qlcnic_adapter *); int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *); bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *); +pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *, + pci_channel_state_t); +pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *); +void qlcnic_82xx_io_resume(struct pci_dev *); /* Functions from qlcnic_init.c */ void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int); @@ -1724,6 +1728,10 @@ struct qlcnic_hardware_ops { void (*set_mac_filter_count) (struct qlcnic_adapter *); void (*free_mac_list) (struct qlcnic_adapter *); int (*read_phys_port_id) (struct qlcnic_adapter *); + pci_ers_result_t (*io_error_detected) (struct pci_dev *, + pci_channel_state_t); + pci_ers_result_t (*io_slot_reset) (struct pci_dev *); + void (*io_resume) (struct pci_dev *); }; extern struct qlcnic_nic_template qlcnic_vf_ops; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index e380f03..b1046c3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -540,6 +540,9 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, .read_phys_port_id = qlcnic_82xx_read_phys_port_id, + .io_error_detected = qlcnic_82xx_io_error_detected, + .io_slot_reset = qlcnic_82xx_io_slot_reset, + .io_resume = qlcnic_82xx_io_resume, }; static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter) @@ -3431,19 +3434,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev) return err; } - if (qlcnic_83xx_check(adapter)) { - /* register for NIC IDC AEN Events */ - qlcnic_83xx_register_nic_idc_func(adapter, 1); - err = qlcnic_83xx_setup_mbx_intr(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "failed to setup mbx interrupt\n"); - qlcnic_clr_all_drv_state(adapter, 1); - clear_bit(__QLCNIC_AER, &adapter->state); - goto done; - } - } - if (netif_running(netdev)) { err = qlcnic_attach(adapter); if (err) { @@ -3464,8 +3454,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev) return err; } -static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) +pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; @@ -3484,12 +3474,6 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) qlcnic_down(adapter, netdev); - if (qlcnic_83xx_check(adapter)) { - qlcnic_83xx_free_mbx_intr(adapter); - qlcnic_83xx_register_nic_idc_func(adapter, 0); - cancel_delayed_work_sync(&adapter->idc_aen_work); - } - qlcnic_detach(adapter); qlcnic_teardown_intr(adapter); @@ -3501,13 +3485,13 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_NEED_RESET; } -static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) +pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev) { return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } -static void qlcnic_io_resume(struct pci_dev *pdev) +void qlcnic_82xx_io_resume(struct pci_dev *pdev) { u32 state; struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); @@ -3517,9 +3501,48 @@ static void qlcnic_io_resume(struct pci_dev *pdev) if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER, &adapter->state)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, - FW_POLL_DELAY); + FW_POLL_DELAY); } +static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; + + if (hw_ops->io_error_detected) { + return hw_ops->io_error_detected(pdev, state); + } else { + dev_err(&pdev->dev, "AER error_detected handler not registered.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } +} + +static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; + + if (hw_ops->io_slot_reset) { + return hw_ops->io_slot_reset(pdev); + } else { + dev_err(&pdev->dev, "AER slot_reset handler not registered.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } +} + +static void qlcnic_io_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; + + if (hw_ops->io_resume) + hw_ops->io_resume(pdev); + else + dev_err(&pdev->dev, "AER resume handler not registered.\n"); +} + + static int qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) { -- cgit v0.10.2 From 9ce226fa2352eac89ce79abdea8e465c680d1db0 Mon Sep 17 00:00:00 2001 From: Pratik Pujar Date: Fri, 30 Aug 2013 13:51:21 -0400 Subject: qlcnic: Add AER support for 83xx adapter Signed-off-by: Pratik Pujar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 36223a9..3ef5437 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -11,6 +11,7 @@ #include #include #include +#include #define QLCNIC_MAX_TX_QUEUES 1 #define RSS_HASHTYPE_IP_TCP 0x3 @@ -177,6 +178,10 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .get_board_info = qlcnic_83xx_get_port_info, .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, + .io_error_detected = qlcnic_83xx_io_error_detected, + .io_slot_reset = qlcnic_83xx_io_slot_reset, + .io_resume = qlcnic_83xx_io_resume, + }; static struct qlcnic_nic_template qlcnic_83xx_ops = { @@ -3819,3 +3824,57 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter) set_bit(QLC_83XX_MBX_READY, &mbx->status); return 0; } + +pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (state == pci_channel_io_normal) + return PCI_ERS_RESULT_RECOVERED; + + set_bit(__QLCNIC_AER, &adapter->state); + set_bit(__QLCNIC_RESETTING, &adapter->state); + + qlcnic_83xx_aer_stop_poll_work(adapter); + + pci_save_state(pdev); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + int err = 0; + + pdev->error_state = pci_channel_io_normal; + err = pci_enable_device(pdev); + if (err) + goto disconnect; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + err = qlcnic_83xx_aer_reset(adapter); + if (err == 0) + return PCI_ERS_RESULT_RECOVERED; +disconnect: + clear_bit(__QLCNIC_AER, &adapter->state); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return PCI_ERS_RESULT_DISCONNECT; +} + +void qlcnic_83xx_io_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + pci_cleanup_aer_uncorrect_error_status(pdev); + if (test_and_clear_bit(__QLCNIC_AER, &adapter->state)) + qlcnic_83xx_aer_start_poll_work(adapter); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 053a3a1..6752d58 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -657,4 +657,11 @@ int qlcnic_83xx_idc_init(struct qlcnic_adapter *); int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *); int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *); int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *); +void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *); +int qlcnic_83xx_aer_reset(struct qlcnic_adapter *); +void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *); +pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, + pci_channel_state_t); +pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); +void qlcnic_83xx_io_resume(struct pci_dev *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 6fac393..b7eddfe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -797,7 +797,6 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter) ret = qlcnic_83xx_idc_restart_hw(adapter, 1); } else { ret = qlcnic_83xx_idc_check_timeout(adapter, timeout); - return ret; } return ret; @@ -2249,3 +2248,58 @@ detach_mbx: exit: return err; } + +void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + + clear_bit(QLC_83XX_MBX_READY, &idc->status); + cancel_delayed_work_sync(&adapter->fw_work); + + if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) + qlcnic_83xx_disable_vnic_mode(adapter, 1); + + qlcnic_83xx_idc_detach_driver(adapter); + qlcnic_83xx_register_nic_idc_func(adapter, 0); + + cancel_delayed_work_sync(&adapter->idc_aen_work); +} + +int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + int ret = 0; + u32 owner; + + /* Mark the previous IDC state as NEED_RESET so + * that state_entry() will perform the reattachment + * and bringup the device + */ + idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET; + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) { + ret = qlcnic_83xx_restart_hw(adapter); + if (ret < 0) + return ret; + qlcnic_83xx_idc_clear_registers(adapter, 0); + } + + ret = idc->state_entry(adapter); + return ret; +} + +void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + u32 owner; + + idc->prev_state = QLC_83XX_IDC_DEV_READY; + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) + qlcnic_83xx_idc_enter_ready_state(adapter, 0); + + qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0); +} -- cgit v0.10.2 From 7000078aabd662fd7f5da0ae09b4b02387a83ba6 Mon Sep 17 00:00:00 2001 From: Pratik Pujar Date: Fri, 30 Aug 2013 13:51:22 -0400 Subject: qlcnic: Restructuring of qlc_83xx_fw_info structure. o Removed unused and unnecessary members from qlc_83xx_fw_info structure. o Made fw_info member of qlcnic_hardware_context as a pointer to qlc_83xx_fw_info structure. o Added a member fw_file_name to qlc_83xx_fw_info structure which will hold the name of firmware image file name. Signed-off-by: Pratik Pujar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 318663f..204d6e1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -463,7 +463,7 @@ struct qlcnic_hardware_context { struct qlcnic_fdt fdt; struct qlc_83xx_reset reset; struct qlc_83xx_idc idc; - struct qlc_83xx_fw_info fw_info; + struct qlc_83xx_fw_info *fw_info; struct qlcnic_intrpt_config *intr_tbl; struct qlcnic_sriov *sriov; u32 *reg_tbl; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 6752d58..2a2ab6b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -274,11 +274,7 @@ struct qlcnic_macvlan_mbx { struct qlc_83xx_fw_info { const struct firmware *fw; - u16 major_fw_version; - u8 minor_fw_version; - u8 sub_fw_version; - u8 fw_build_num; - u8 load_from_file; + char fw_file_name[QLC_FW_FILE_NAME_LEN]; }; struct qlc_83xx_reset { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index b7eddfe..c24c2a4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1267,31 +1267,33 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) { + struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; + const struct firmware *fw = fw_info->fw; u32 dest, *p_cache; - u64 addr; + int i, ret = -EIO; u8 data[16]; size_t size; - int i, ret = -EIO; + u64 addr; dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); - size = (adapter->ahw->fw_info.fw->size & ~0xF); - p_cache = (u32 *)adapter->ahw->fw_info.fw->data; + size = (fw->size & ~0xF); + p_cache = (u32 *)fw->data; addr = (u64)dest; ret = qlcnic_83xx_ms_mem_write128(adapter, addr, (u32 *)p_cache, size / 16); if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); - release_firmware(adapter->ahw->fw_info.fw); - adapter->ahw->fw_info.fw = NULL; + release_firmware(fw); + fw_info->fw = NULL; return -EIO; } /* alignment check */ - if (adapter->ahw->fw_info.fw->size & 0xF) { + if (fw->size & 0xF) { addr = dest + size; - for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++) - data[i] = adapter->ahw->fw_info.fw->data[size + i]; + for (i = 0; i < (fw->size & 0xF); i++) + data[i] = fw->data[size + i]; for (; i < 16; i++) data[i] = 0; ret = qlcnic_83xx_ms_mem_write128(adapter, addr, @@ -1299,13 +1301,13 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); - release_firmware(adapter->ahw->fw_info.fw); - adapter->ahw->fw_info.fw = NULL; + release_firmware(fw); + fw_info->fw = NULL; return -EIO; } } - release_firmware(adapter->ahw->fw_info.fw); - adapter->ahw->fw_info.fw = NULL; + release_firmware(fw); + fw_info->fw = NULL; return 0; } @@ -1949,35 +1951,12 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } -static void qlcnic_83xx_get_fw_file_name(struct qlcnic_adapter *adapter, - char *file_name) -{ - struct pci_dev *pdev = adapter->pdev; - - memset(file_name, 0, QLC_FW_FILE_NAME_LEN); - - switch (pdev->device) { - case PCI_DEVICE_ID_QLOGIC_QLE834X: - strncpy(file_name, QLC_83XX_FW_FILE_NAME, - QLC_FW_FILE_NAME_LEN); - break; - case PCI_DEVICE_ID_QLOGIC_QLE844X: - strncpy(file_name, QLC_84XX_FW_FILE_NAME, - QLC_FW_FILE_NAME_LEN); - break; - default: - dev_err(&pdev->dev, "%s: Invalid device id\n", - __func__); - } -} - static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) { - char fw_file_name[QLC_FW_FILE_NAME_LEN]; + struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; int err = -EIO; - qlcnic_83xx_get_fw_file_name(adapter, fw_file_name); - if (request_firmware(&adapter->ahw->fw_info.fw, fw_file_name, + if (request_firmware(&fw_info->fw, fw_info->fw_file_name, &(adapter->pdev->dev))) { dev_err(&adapter->pdev->dev, "No file FW image, loading flash FW image.\n"); @@ -2173,6 +2152,39 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) } } +static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct pci_dev *pdev = adapter->pdev; + struct qlc_83xx_fw_info *fw_info; + int err = 0; + + ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL); + if (!ahw->fw_info) { + err = -ENOMEM; + } else { + fw_info = ahw->fw_info; + switch (pdev->device) { + case PCI_DEVICE_ID_QLOGIC_QLE834X: + strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + break; + case PCI_DEVICE_ID_QLOGIC_QLE844X: + strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + break; + default: + dev_err(&pdev->dev, "%s: Invalid device id\n", + __func__); + err = -EINVAL; + break; + } + } + + return err; +} + + int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) { struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -2198,10 +2210,14 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (!qlcnic_83xx_read_flash_descriptor_table(adapter)) qlcnic_83xx_read_flash_mfg_id(adapter); - err = qlcnic_83xx_idc_init(adapter); + err = qlcnic_83xx_get_fw_info(adapter); if (err) goto detach_mbx; + err = qlcnic_83xx_idc_init(adapter); + if (err) + goto clear_fw_info; + err = qlcnic_setup_intr(adapter, 0, 0); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); @@ -2242,6 +2258,9 @@ disable_mbx_intr: disable_intr: qlcnic_teardown_intr(adapter); +clear_fw_info: + kfree(ahw->fw_info); + detach_mbx: qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_free_mailbox(ahw->mailbox); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index b1046c3..2097d34 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2402,6 +2402,7 @@ static void qlcnic_remove(struct pci_dev *pdev) qlcnic_83xx_free_mbx_intr(adapter); qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_free_mailbox(ahw->mailbox); + kfree(ahw->fw_info); } qlcnic_detach(adapter); -- cgit v0.10.2 From 35dafcb0a993cce00ed875db377a372459fa76e0 Mon Sep 17 00:00:00 2001 From: Sony Chacko Date: Fri, 30 Aug 2013 13:51:23 -0400 Subject: qlcnic: Add support for per port eswitch configuration There is an embedded switch per physical port on the adapter. Add support for enabling and disabling the embedded switch on per port basis. Signed-off-by: Sony Chacko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 204d6e1..46d50f0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -837,6 +837,7 @@ struct qlcnic_mac_list_s { #define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 #define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 #define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 +#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8 /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 @@ -1184,6 +1185,7 @@ struct qlcnic_pci_info { }; struct qlcnic_npar_info { + bool eswitch_status; u16 pvid; u16 min_bw; u16 max_bw; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 3ef5437..a1818da 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2331,7 +2331,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, pci_info->tx_max_bw, pci_info->mac); } if (ahw->op_mode == QLCNIC_MGMT_FUNC) - dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n", + dev_info(dev, "Max functions = %d, active functions = %d\n", ahw->max_pci_func, ahw->act_pci_func); } else { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 2a2ab6b..533e150 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -411,6 +411,7 @@ enum qlcnic_83xx_states { #define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) #define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) #define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) +#define QLC_83XX_ESWITCH_CAPABILITY BIT_23 #define QLC_83XX_VIRTUAL_NIC_MODE 0xFF #define QLC_83XX_DEFAULT_MODE 0x0 #define QLC_83XX_SRIOV_MODE 0x1 @@ -625,6 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); +int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index c24c2a4..f09e787 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2003,36 +2003,6 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) return 0; } -/** -* qlcnic_83xx_config_default_opmode -* -* @adapter: adapter structure -* -* Configure default driver operating mode -* -* Returns: Error code or Success(0) -* */ -int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter) -{ - u32 op_mode; - struct qlcnic_hardware_context *ahw = adapter->ahw; - - qlcnic_get_func_no(adapter); - op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE); - - if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) - op_mode = QLC_83XX_DEFAULT_OPMODE; - - if (op_mode == QLC_83XX_DEFAULT_OPMODE) { - adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; - ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; - } else { - return -EIO; - } - - return 0; -} - int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) { int err; @@ -2052,26 +2022,26 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) ahw->max_mac_filters = nic_info.max_mac_filters; ahw->max_mtu = nic_info.max_mtu; - /* VNIC mode is detected by BIT_23 in capabilities. This bit is also - * set in case device is SRIOV capable. VNIC and SRIOV are mutually - * exclusive. So in case of sriov capable device load driver in - * default mode + /* eSwitch capability indicates vNIC mode. + * vNIC and SRIOV are mutually exclusive operational modes. + * If SR-IOV capability is detected, SR-IOV physical function + * will get initialized in default mode. + * SR-IOV virtual function initialization follows a + * different code path and opmode. + * SRIOV mode has precedence over vNIC mode. */ - if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) { - ahw->nic_mode = QLC_83XX_DEFAULT_MODE; - return ahw->nic_mode; - } + if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) + return QLC_83XX_DEFAULT_OPMODE; - if (ahw->capabilities & BIT_23) - ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; - else - ahw->nic_mode = QLC_83XX_DEFAULT_MODE; + if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) + return QLC_83XX_VIRTUAL_NIC_MODE; - return ahw->nic_mode; + return QLC_83XX_DEFAULT_OPMODE; } int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; int ret; ret = qlcnic_83xx_get_nic_configuration(adapter); @@ -2079,11 +2049,16 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) return -EIO; if (ret == QLC_83XX_VIRTUAL_NIC_MODE) { + ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; if (qlcnic_83xx_config_vnic_opmode(adapter)) return -EIO; - } else if (ret == QLC_83XX_DEFAULT_MODE) { - if (qlcnic_83xx_config_default_opmode(adapter)) - return -EIO; + + } else if (ret == QLC_83XX_DEFAULT_OPMODE) { + ahw->nic_mode = QLC_83XX_DEFAULT_MODE; + adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + } else { + return -EIO; } return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index 599d1fd..0248a4c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -208,7 +208,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) return -EIO; } - if (ahw->capabilities & BIT_23) + if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) adapter->flags |= QLCNIC_ESWITCH_ENABLED; else adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; @@ -239,3 +239,41 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) return 0; } + +static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter, + int func, int *port_id) +{ + struct qlcnic_info nic_info; + int err = 0; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + + err = qlcnic_get_nic_info(adapter, &nic_info, func); + if (err) + return err; + + if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY) + *port_id = nic_info.phys_port; + else + err = -EIO; + + return err; +} + +int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func) +{ + int id, err = 0; + + err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id); + if (err) + return err; + + if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) { + if (!qlcnic_enable_eswitch(adapter, id, 1)) + adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE; + else + err = -EIO; + } + + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2097d34..c4c5023 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -796,6 +796,23 @@ static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter) return ret; } +static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter) +{ + bool ret = false; + + if (qlcnic_84xx_check(adapter)) { + ret = true; + } else if (qlcnic_83xx_check(adapter)) { + if (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG) + ret = true; + else + ret = false; + } + + return ret; +} + int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) { struct qlcnic_pci_info *pci_info; @@ -839,18 +856,30 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) (pci_info[i].type != QLCNIC_TYPE_NIC)) continue; + if (qlcnic_port_eswitch_cfg_capability(adapter)) { + if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn)) + adapter->npars[j].eswitch_status = true; + else + continue; + } else { + adapter->npars[j].eswitch_status = true; + } + adapter->npars[j].pci_func = pfn; adapter->npars[j].active = (u8)pci_info[i].active; adapter->npars[j].type = (u8)pci_info[i].type; adapter->npars[j].phy_port = (u8)pci_info[i].default_port; adapter->npars[j].min_bw = pci_info[i].tx_min_bw; adapter->npars[j].max_bw = pci_info[i].tx_max_bw; + j++; } - for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) { - adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; - if (qlcnic_83xx_check(adapter)) + if (qlcnic_82xx_check(adapter)) { + for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) + adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; + } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) { + for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) qlcnic_enable_eswitch(adapter, i, 1); } @@ -1275,6 +1304,9 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) return 0; for (i = 0; i < adapter->ahw->act_pci_func; i++) { + if (!adapter->npars[i].eswitch_status) + continue; + memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); esw_cfg.pci_func = adapter->npars[i].pci_func; esw_cfg.mac_override = BIT_0; @@ -1337,6 +1369,9 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) for (i = 0; i < adapter->ahw->act_pci_func; i++) { npar = &adapter->npars[i]; pci_func = npar->pci_func; + if (!adapter->npars[i].eswitch_status) + continue; + memset(&nic_info, 0, sizeof(struct qlcnic_info)); err = qlcnic_get_nic_info(adapter, &nic_info, pci_func); if (err) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 660c3f5..c6165d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -465,8 +465,14 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, memset(&pm_cfg, 0, sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC); - for (i = 0; i < adapter->ahw->act_pci_func; i++) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { pci_func = adapter->npars[i].pci_func; + if (!adapter->npars[i].active) + continue; + + if (!adapter->npars[i].eswitch_status) + continue; + pm_cfg[pci_func].action = adapter->npars[i].enable_pm; pm_cfg[pci_func].dest_npar = 0; pm_cfg[pci_func].pci_func = i; @@ -632,8 +638,14 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC); - for (i = 0; i < adapter->ahw->act_pci_func; i++) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { pci_func = adapter->npars[i].pci_func; + if (!adapter->npars[i].active) + continue; + + if (!adapter->npars[i].eswitch_status) + continue; + esw_cfg[pci_func].pci_func = pci_func; if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) return QL_STATUS_INVALID_PARAM; @@ -732,6 +744,9 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, if (ret) return ret; + if (!adapter->npars[i].eswitch_status) + continue; + np_cfg[i].pci_func = i; np_cfg[i].op_mode = (u8)nic_info.op_mode; np_cfg[i].port_num = nic_info.phys_port; -- cgit v0.10.2 From 693dcd2fb23a19b29ebff786b3283cc7eeb60edb Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Fri, 30 Aug 2013 13:51:24 -0400 Subject: qlcnic: Update version to 5.3.50 Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 46d50f0..4615960 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 49 -#define QLCNIC_LINUX_VERSIONID "5.3.49" +#define _QLCNIC_LINUX_SUBVERSION 50 +#define QLCNIC_LINUX_VERSIONID "5.3.50" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v0.10.2 From 8c1bb79fde8117cc36db075c2da238af703717d7 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 2 Sep 2013 10:06:51 +0800 Subject: vxlan: fix flowi6_proto value It should be IPPROTO_UDP. Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3ffb22d..0fcf3f7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1671,7 +1671,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, fl6.flowi6_oif = rdst->remote_ifindex; fl6.daddr = dst->sin6.sin6_addr; fl6.saddr = vxlan->saddr.sin6.sin6_addr; - fl6.flowi6_proto = skb->protocol; + fl6.flowi6_proto = IPPROTO_UDP; if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) { netdev_dbg(dev, "no route to %pI6\n", -- cgit v0.10.2 From 660d98cae0a474887bb5d66e60422addb4c6532c Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 2 Sep 2013 10:06:52 +0800 Subject: vxlan: include net/ip6_checksum.h for csum_ipv6_magic() Fengguang reported a compile warning: drivers/net/vxlan.c: In function 'vxlan6_xmit_skb': drivers/net/vxlan.c:1352:3: error: implicit declaration of function 'csum_ipv6_magic' [-Werror=implicit-function-declaration] cc1: some warnings being treated as errors this patch fixes it. Reported-by: kbuild test robot Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0fcf3f7..6b560f3 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -44,6 +44,7 @@ #include #include #include +#include #endif #define VXLAN_VERSION "0.1" -- cgit v0.10.2 From 5a17a390de7bdbcfff9b8f344273a886ca4cf8bf Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 2 Sep 2013 10:06:53 +0800 Subject: net: make snmp_mib_free static inline Fengguang reported: net/built-in.o: In function `in6_dev_finish_destroy': (.text+0x4ca7d): undefined reference to `snmp_mib_free' this is due to snmp_mib_free() is defined when CONFIG_INET is enabled, but in6_dev_finish_destroy() is now moved to core kernel. I think snmp_mib_free() is small enough to be inlined, so just make it static inline. Reported-by: kbuild test robot Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/include/net/ip.h b/include/net/ip.h index a68f838..48f5597 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -194,7 +194,17 @@ static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp } #endif extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align); -extern void snmp_mib_free(void __percpu *ptr[2]); + +static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ]) +{ + int i; + + BUG_ON(ptr == NULL); + for (i = 0; i < SNMP_ARRAY_SZ; i++) { + free_percpu(ptr[i]); + ptr[i] = NULL; + } +} extern struct local_ports { seqlock_t lock; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index b4d0be2..7a1874b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1532,18 +1532,6 @@ int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align) } EXPORT_SYMBOL_GPL(snmp_mib_init); -void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ]) -{ - int i; - - BUG_ON(ptr == NULL); - for (i = 0; i < SNMP_ARRAY_SZ; i++) { - free_percpu(ptr[i]); - ptr[i] = NULL; - } -} -EXPORT_SYMBOL_GPL(snmp_mib_free); - #ifdef CONFIG_IP_MULTICAST static const struct net_protocol igmp_protocol = { .handler = igmp_rcv, -- cgit v0.10.2 From 83a093b486ecfd25e7b04b82f6a388f24bd274b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:44 +0200 Subject: net: etherdevice: add address inherit helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some etherdevices inherit their address from a parent or master device. The addr_assign_type should be updated along with the address in these cases. Adding a helper function to simplify this. Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index c623861..d8b5124 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -199,6 +199,21 @@ static inline void eth_hw_addr_random(struct net_device *dev) } /** + * eth_hw_addr_inherit - Copy dev_addr from another net_device + * @dst: pointer to net_device to copy dev_addr to + * @src: pointer to net_device to copy dev_addr from + * + * Copy the Ethernet address from one net_device to another along with + * the address attributes (addr_assign_type). + */ +static inline void eth_hw_addr_inherit(struct net_device *dst, + struct net_device *src) +{ + dst->addr_assign_type = src->addr_assign_type; + memcpy(dst->dev_addr, src->dev_addr, ETH_ALEN); +} + +/** * compare_ether_addr - Compare two Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address -- cgit v0.10.2 From 6b93f4a1f2f22b23a2e1e4cca1e1aecffee2193e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:45 +0200 Subject: net: vlan: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Cc: Patrick McHardy Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9ab8a7e..09bf1c3 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -582,7 +582,7 @@ static int vlan_dev_init(struct net_device *dev) dev->dev_id = real_dev->dev_id; if (is_zero_ether_addr(dev->dev_addr)) - memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len); + eth_hw_addr_inherit(dev, real_dev); if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); -- cgit v0.10.2 From 2fcc8005834a390e999ede2a8933012b92db73ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:46 +0200 Subject: net: dsa: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6ebd8fb..29d684e 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -347,7 +347,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->features = master->vlan_features; SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); - memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(slave_dev, master); slave_dev->tx_queue_len = 0; switch (ds->dst->tag_protocol) { -- cgit v0.10.2 From 8b98604e398418b9f1a1e44ac79fbbc134818f50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:47 +0200 Subject: net: macvlan: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Cc: Patrick McHardy Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 201ef17..64dfaa3 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -823,7 +823,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (port->count) return -EINVAL; port->passthru = true; - memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(dev, lowerdev); } err = netdev_upper_dev_link(lowerdev, dev); -- cgit v0.10.2 From 93af735736d161f4383cda2de44a0d7c2fd2ad45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:48 +0200 Subject: net: team: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Cc: Jiri Pirko Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 9ccccd4..50e43e6 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1974,7 +1974,7 @@ static void team_setup_by_port(struct net_device *dev, dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); - memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); + eth_hw_addr_inherit(dev, port_dev); } static int team_dev_type_check_change(struct net_device *dev, -- cgit v0.10.2 From 252352cb5eece744456863c264bc02a43f4d2366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:49 +0200 Subject: net: airo: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Acked-by: John W. Linville Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index d0adbaf..7fe1964 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -2693,7 +2693,7 @@ static struct net_device *init_wifidev(struct airo_info *ai, dev->base_addr = ethdev->base_addr; dev->wireless_data = ethdev->wireless_data; SET_NETDEV_DEV(dev, ethdev->dev.parent); - memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); + eth_hw_addr_inherit(dev, ethdev); err = register_netdev(dev); if (err<0) { free_netdev(dev); -- cgit v0.10.2 From db181347e838ec87e31e2488697b6f0eea418e5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:50 +0200 Subject: net: hostap: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Cc: Jouni Malinen Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 6307a4e..c275dc1 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -1425,7 +1425,7 @@ static int prism2_hw_init2(struct net_device *dev, int initial) } list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); - memcpy(iface->dev->dev_addr, dev->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(iface->dev, dev); } } else if (local->fw_ap) prism2_check_sta_fw_version(local); diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index e4f56ad..a1257c9 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -66,7 +66,7 @@ struct net_device * hostap_add_interface(struct local_info *local, list_add(&iface->list, &local->hostap_interfaces); mdev = local->dev; - memcpy(dev->dev_addr, mdev->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(dev, mdev); dev->base_addr = mdev->base_addr; dev->irq = mdev->irq; dev->mem_start = mdev->mem_start; -- cgit v0.10.2 From d32a96e26acce17c565480363a1078f0c9e677f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:51 +0200 Subject: net: libertas: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Acked-by: John W. Linville Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c index efae07e..6fef746 100644 --- a/drivers/net/wireless/libertas/mesh.c +++ b/drivers/net/wireless/libertas/mesh.c @@ -1017,7 +1017,7 @@ static int lbs_add_mesh(struct lbs_private *priv) mesh_dev->netdev_ops = &mesh_netdev_ops; mesh_dev->ethtool_ops = &lbs_ethtool_ops; - memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(mesh_dev, priv->dev); SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); -- cgit v0.10.2 From 314cb11b493e7b8cfc7e2db01ad8e4e8bc0a259d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Fri, 30 Aug 2013 18:08:52 +0200 Subject: staging: vt6655: inherit addr_assign_type along with dev_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device inheriting a random or set address should reflect this in its addr_assign_type. Cc: Forest Bond Signed-off-by: Bjørn Mork Acked-by: Greg Kroah-Hartman Signed-off-by: David S. Miller diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c index 57a08c5..8acff44 100644 --- a/drivers/staging/vt6655/hostap.c +++ b/drivers/staging/vt6655/hostap.c @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) apdev_priv = netdev_priv(pDevice->apdev); *apdev_priv = *pDevice; - memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(pDevice->apdev, dev); pDevice->apdev->netdev_ops = &apdev_netdev_ops; diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c index 46e0e41..b5cd2e4 100644 --- a/drivers/staging/vt6655/ioctl.c +++ b/drivers/staging/vt6655/ioctl.c @@ -460,7 +460,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) } if (sValue.dwValue == 1) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n"); - memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(pDevice->wpadev, pDevice->dev); pDevice->bWPADEVUp = true; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n"); diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c index 869f62c..e8d9ecd 100644 --- a/drivers/staging/vt6655/wpactl.c +++ b/drivers/staging/vt6655/wpactl.c @@ -96,7 +96,7 @@ static int wpa_init_wpadev(PSDevice pDevice) wpadev_priv = netdev_priv(pDevice->wpadev); *wpadev_priv = *pDevice; - memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN); + eth_hw_addr_inherit(pDevice->wpadev, dev); pDevice->wpadev->base_addr = dev->base_addr; pDevice->wpadev->irq = dev->irq; pDevice->wpadev->mem_start = dev->mem_start; -- cgit v0.10.2 From 4f49129be6fa9b41d1b406ed911da07ce15a7ea5 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Tue, 27 Aug 2013 17:09:02 +0200 Subject: virtio-net: Set RXCSUM feature if GUEST_CSUM is available If the VIRTIO_NET_F_GUEST_CSUM virtio feature is available, the guest does not have to calculate the checksums on all received packets. This is pretty much the same feature as RX checksum offloading on real network cards, so the virtio-net driver should report this by setting the NETIF_F_RXCSUM flag. When the user now runs "ethtool -k", he or she can see whether the virtio-net interface has to calculate RX checksums or not. Signed-off-by: Thomas Huth Acked-by: Rusty Russell Signed-off-by: David S. Miller diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f216002..defec2b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1538,6 +1538,8 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); /* (!csum && gso) case will be fixed by register_netdev() */ } + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) + dev->features |= NETIF_F_RXCSUM; dev->vlan_features = dev->features; -- cgit v0.10.2 From 3e25c65ed085b361cc91a8f02e028f1158c9f255 Mon Sep 17 00:00:00 2001 From: Tim Gardner Date: Thu, 29 Aug 2013 06:38:47 -0600 Subject: net: neighbour: Remove CONFIG_ARPD This config option is superfluous in that it only guards a call to neigh_app_ns(). Enabling CONFIG_ARPD by default has no change in behavior. There will now be call to __neigh_notify() for each ARP resolution, which has no impact unless there is a user space daemon waiting to receive the notification, i.e., the case for which CONFIG_ARPD was designed anyways. Suggested-by: Eric W. Biederman Cc: "David S. Miller" Cc: Alexey Kuznetsov Cc: James Morris Cc: Hideaki YOSHIFUJI Cc: Patrick McHardy Cc: "Eric W. Biederman" Cc: Gao feng Cc: Joe Perches Cc: Veaceslav Falico Signed-off-by: Tim Gardner Reviewed-by: "Eric W. Biederman" Signed-off-by: David S. Miller diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 60533db..6072610 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2759,13 +2759,11 @@ errout: rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } -#ifdef CONFIG_ARPD void neigh_app_ns(struct neighbour *n) { __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST); } EXPORT_SYMBOL(neigh_app_ns); -#endif /* CONFIG_ARPD */ #ifdef CONFIG_SYSCTL static int zero; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 37cf1a6..05c57f0 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -259,22 +259,6 @@ config IP_PIMSM_V2 gated-5). This routing protocol is not used widely, so say N unless you want to play with it. -config ARPD - bool "IP: ARP daemon support" - ---help--- - The kernel maintains an internal cache which maps IP addresses to - hardware addresses on the local network, so that Ethernet - frames are sent to the proper address on the physical networking - layer. Normally, kernel uses the ARP protocol to resolve these - mappings. - - Saying Y here adds support to have an user space daemon to do this - resolution instead. This is useful for implementing an alternate - address resolution protocol (e.g. NHRP on mGRE tunnels) and also for - testing purposes. - - If unsure, say N. - config SYN_COOKIES bool "IP: TCP syncookie support" ---help--- diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 4429b01..7808093 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -368,9 +368,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) } else { probes -= neigh->parms->app_probes; if (probes < 0) { -#ifdef CONFIG_ARPD neigh_app_ns(neigh); -#endif return; } } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 14bd2f9..2221065 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -662,9 +662,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) } ndisc_send_ns(dev, neigh, target, target, saddr); } else if ((probes -= neigh->parms->app_probes) < 0) { -#ifdef CONFIG_ARPD neigh_app_ns(neigh); -#endif } else { addrconf_addr_solict_mult(target, &mcaddr); ndisc_send_ns(dev, NULL, target, &mcaddr, saddr); -- cgit v0.10.2 From b1b72076b90d631637497b35e36bc64254df199d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 29 Aug 2013 18:54:49 +0200 Subject: net: sctp: probe: allow more advanced ingress filtering by mark This is a follow-up commit for commit b1dcdc68b1f4 ("net: tcp_probe: allow more advanced ingress filtering by mark") that allows for advanced SCTP probe module filtering based on skb mark (for a more detailed description and advantages using mark, refer to b1dcdc68b1f4). The current option to filter by a given port is still being preserved. Signed-off-by: Daniel Borkmann Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/net/sctp/probe.c b/net/sctp/probe.c index cd72ae5..53c452e 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -46,6 +46,10 @@ static int port __read_mostly = 0; MODULE_PARM_DESC(port, "Port to match (0=all)"); module_param(port, int, 0); +static unsigned int fwmark __read_mostly = 0; +MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)"); +module_param(fwmark, uint, 0); + static int bufsize __read_mostly = 64 * 1024; MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); module_param(bufsize, int, 0); @@ -129,15 +133,19 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, void *arg, sctp_cmd_seq_t *commands) { + struct sctp_chunk *chunk = arg; + struct sk_buff *skb = chunk->skb; struct sctp_transport *sp; static __u32 lcwnd = 0; struct timespec now; sp = asoc->peer.primary_path; - if ((full || sp->cwnd != lcwnd) && - (!port || asoc->peer.port == port || - ep->base.bind_addr.port == port)) { + if (((port == 0 && fwmark == 0) || + asoc->peer.port == port || + ep->base.bind_addr.port == port || + (fwmark > 0 && skb->mark == fwmark)) && + (full || sp->cwnd != lcwnd)) { lcwnd = sp->cwnd; getnstimeofday(&now); @@ -198,8 +206,8 @@ static __init int sctpprobe_init(void) if (ret) goto remove_proc; - pr_info("probe registered (port=%d)\n", port); - + pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n", + port, fwmark, bufsize); return 0; remove_proc: -- cgit v0.10.2 From 6f477d420183f7cacc73f46a93e12961cb234e91 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Thu, 29 Aug 2013 23:38:56 +0200 Subject: bonding: remove bond_vlan_used() We're using it currently to verify if we have vlans before getting the tag from the skb we're about to send. It's useless because the vlan_get_tag() verifies if the skb has the tag (and returns an error if not), and we can receive tagged skbs only if we *already* have vlans. Plus, the current RCUed implementation is kind of useless anyway - the we can remove the last vlan in the moment we return from the function. So remove the only usage of it and the whole function. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 0182352..27b03fa 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -694,10 +694,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon client_info->ntt = 0; } - if (bond_vlan_used(bond)) { - if (!vlan_get_tag(skb, &client_info->vlan_id)) - client_info->tag = 1; - } + if (!vlan_get_tag(skb, &client_info->vlan_id)) + client_info->tag = 1; if (!client_info->assigned) { u32 prev_tbl_head = bond_info->rx_hashtbl_used_head; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 4abc925..f7ab161 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -261,24 +261,6 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ }; -/* if we hold rtnl_lock() - call vlan_uses_dev() */ -static inline bool bond_vlan_used(struct bonding *bond) -{ - struct net_device *upper; - struct list_head *iter; - - rcu_read_lock(); - netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) { - if (upper->priv_flags & IFF_802_1Q_VLAN) { - rcu_read_unlock(); - return true; - } - } - rcu_read_unlock(); - - return false; -} - #define bond_slave_get_rcu(dev) \ ((struct slave *) rcu_dereference(dev->rx_handler_data)) -- cgit v0.10.2 From d3ab3ffd1d728d7ee77340e7e7e2c7cfe6a4013e Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Thu, 29 Aug 2013 23:38:57 +0200 Subject: bonding: use rlb_client_info->vlan_id instead of ->tag Store VID in ->vlan_id (if any), and remove the useless ->tag. CC: Jay Vosburgh CC: Andy Gospodarek Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 27b03fa..91f179d 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -512,7 +512,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) skb->dev = client_info->slave->dev; - if (client_info->tag) { + if (client_info->vlan_id) { skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", @@ -695,7 +695,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon } if (!vlan_get_tag(skb, &client_info->vlan_id)) - client_info->tag = 1; + client_info->vlan_id = 0; if (!client_info->assigned) { u32 prev_tbl_head = bond_info->rx_hashtbl_used_head; @@ -801,7 +801,7 @@ static void rlb_init_table_entry_dst(struct rlb_client_info *entry) entry->used_prev = RLB_NULL_INDEX; entry->assigned = 0; entry->slave = NULL; - entry->tag = 0; + entry->vlan_id = 0; } static void rlb_init_table_entry_src(struct rlb_client_info *entry) { @@ -958,7 +958,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]); u32 next_index = bond_info->rx_hashtbl[curr_index].used_next; - if (curr->tag && (curr->vlan_id == vlan_id)) + if (curr->vlan_id == vlan_id) rlb_delete_table_entry(bond, curr_index); curr_index = next_index; diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index e02c9c5..28d8e4c 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@ -125,7 +125,6 @@ struct rlb_client_info { u8 assigned; /* checking whether this entry is assigned */ u8 ntt; /* flag - need to transmit client info */ struct slave *slave; /* the slave assigned to this client */ - u8 tag; /* flag - need to tag skb */ unsigned short vlan_id; /* VLAN tag associated with IP address */ }; -- cgit v0.10.2 From 7db8e0c14c9cb4adb667b4c558d8ffec8d5b40f2 Mon Sep 17 00:00:00 2001 From: Kouei Abe Date: Fri, 30 Aug 2013 12:41:07 +0900 Subject: sh_eth: Fix cache invalidation omission of receive buffer Signed-off-by: Kouei Abe Signed-off-by: Simon Horman Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 06d8f62..60ec0f7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1342,6 +1342,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) mdp->rx_skbuff[entry] = NULL; if (mdp->cd->rpadir) skb_reserve(skb, NET_IP_ALIGN); + dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, + mdp->rx_buf_sz, + DMA_FROM_DEVICE); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); netif_rx(skb); -- cgit v0.10.2 From fd9af07c3404ac9ecbd0d859563360f51ce1ffde Mon Sep 17 00:00:00 2001 From: Kouei Abe Date: Fri, 30 Aug 2013 12:41:08 +0900 Subject: sh_eth: Enable Rx descriptor word 0 shift for r8a7790 This corrects an oversight when r8a7790 support was added to sh_eth. Signed-off-by: Kouei Abe Signed-off-by: Simon Horman Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 60ec0f7..474c8a8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -416,6 +416,7 @@ static struct sh_eth_cpu_data r8a7790_data = { .tpauser = 1, .hw_swap = 1, .rmiimode = 1, + .shift_rd0 = 1, }; static void sh_eth_set_rate_sh7724(struct net_device *ndev) -- cgit v0.10.2 From 7a163bfb7ce50895bbe67300ea610d31b9c09230 Mon Sep 17 00:00:00 2001 From: Dan Aloni Date: Fri, 30 Aug 2013 13:59:46 +0300 Subject: netconsole: avoid a crash with multiple sysfs writers When my 'ifup eth' script was fired multiple times and ran concurrent on my laptop, for some obscure /etc scripting reason, it was revealed that the store_enabled() function in netconsole doesn't handle it nicely, as recorded by the Oops below (a syslog paste, but not mangled too much to prevent from discerning the traceback). On Linux 3.10.4, this patch seeks to remedy the problem, and it has been running stable on my laptop for a few days. [52608.609325] BUG: unable to handle kernel NULL pointer dereference at 00000000000003e0 [52608.609331] IP: [] __netpoll_cleanup+0x27/0xe0 [52608.609339] PGD 15e51a067 PUD 15433e067 PMD 0 [52608.609343] Oops: 0000 [#1] SMP re firewire_ohci firewire_core crc_itu_t [last unloaded: kvm_intel] [52608.609347] Modules linked in: kvm_intel tun vfat fat ppdev parport_pc parport fuse ipt_MASQUERADE usb_storage nf_conntrack_netbios_ns nf_conn [..garbled..] [52608.609433] RAX: 0000000000000000 RBX: ffff880210bbcc68 RCX: 0000000000000000 [52608.609435] RDX: 0000000000000000 RSI: ffff8801ba447da0 RDI: ffff880210bbcc68 [52608.609437] RBP: ffff8801ba447e18 R08: 0000000000000000 R09: 0000000000000001 [52608.609439] R10: 000000000000000a R11: f000000000000000 R12: ffff880210bbcc68 [52608.609441] R13: ffff88020bc41000 R14: 0000000000000002 R15: 000000000000000200000000000 [52608.609443] FS: 00007f38d7bff740(0000) GS:ffff88021dc40000(0000) knlGS:0000000000000000 [52608.609446] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003300000000001427e0 [52608.609448] CR2: 00000000000003e0 CR3: 0000000154103000 CR4: 00000000001427e0 [52608.609450] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [52608.609452] netpoll: netconsole: local port 6665ess 10.0.0.27 [52608.609454] netpoll: netconsole: local IPv4 address 10.0.0.27 [52608.609456] netpoll: netconsole: interface 'em1' [52608.609457] netpoll: netconsole: remote port 514ress 10.0.0.15 [52608.609459] netpoll: netconsole: remote IPv4 address 10.0.0.15:65:a8:9a:c7 [52608.609461] netpoll: netconsole: remote ethernet address 1c:6f:65:a8:9a:c7 [52608.609463] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [52608.609464] Stack:801ba447e08 ffff880210bbcc68 ffffffffffffffea ffff88020bc41000 [52608.609466] ffff8801ba447e08 ffff880210bbcc68 ffffffffffffffea ffff88020bc41000 [52608.609471] 0000000000000002 0000000000000002 ffff8801ba447e38 ffffffff81532af4 [52608.609475] 0000000000000000 ffff880210bbcc00 ffff8801ba447e78 ffffffff81420e7c [52608.609479] Call Trace: [52608.609484] [] netpoll_cleanup+0x24/0x50 [52608.609489] [] store_enabled+0x5c/0xe0 [52608.609492] [] netconsole_target_attr_store+0x2e/0x40 [52608.609498] [] configfs_write_file+0xd2/0x130 [52608.609503] [] vfs_write+0xc5/0x1f0 [52608.609506] [] SyS_write+0x52/0xa0/0x10 [52608.609511] [] ? do_page_fault+0xe/0x10 [52608.609516] [] system_call_fastpath+0x16/0x1b [52608.609517] Code: 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 48 83 ec 30 4c 89 65 e0 48 89 5d d8 49 89 fc 4c 89 6d e8 4c 89 75 f0 4c 89 7d f8 48 8 [..garbled..] [52608.609559] RIP [] __netpoll_cleanup+0x27/0xe0 [52608.609563] RSP [52608.609564] CR2: 00000000000003e0 [52608.609567] ---[ end trace d25ec343349b61d2 ]--- Signed-off-by: Dan Aloni Signed-off-by: Neil Horman CC: David S. Miller Signed-off-by: David S. Miller diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 4822aaf..dcb2134 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -102,6 +102,7 @@ struct netconsole_target { struct config_item item; #endif int enabled; + struct mutex mutex; struct netpoll np; }; @@ -181,6 +182,7 @@ static struct netconsole_target *alloc_param_target(char *target_config) strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); nt->np.local_port = 6665; nt->np.remote_port = 6666; + mutex_init(&nt->mutex); memset(nt->np.remote_mac, 0xff, ETH_ALEN); /* Parse parameters and setup netpoll */ @@ -322,6 +324,7 @@ static ssize_t store_enabled(struct netconsole_target *nt, return -EINVAL; } + mutex_lock(&nt->mutex); if (enabled) { /* 1 */ /* @@ -331,8 +334,10 @@ static ssize_t store_enabled(struct netconsole_target *nt, netpoll_print_options(&nt->np); err = netpoll_setup(&nt->np); - if (err) + if (err) { + mutex_unlock(&nt->mutex); return err; + } printk(KERN_INFO "netconsole: network logging started\n"); @@ -341,6 +346,7 @@ static ssize_t store_enabled(struct netconsole_target *nt, } nt->enabled = enabled; + mutex_unlock(&nt->mutex); return strnlen(buf, count); } @@ -597,6 +603,7 @@ static struct config_item *make_netconsole_target(struct config_group *group, strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); nt->np.local_port = 6665; nt->np.remote_port = 6666; + mutex_init(&nt->mutex); memset(nt->np.remote_mac, 0xff, ETH_ALEN); /* Initialize the config_item member */ @@ -682,7 +689,11 @@ restart: * we might sleep in __netpoll_cleanup() */ spin_unlock_irqrestore(&target_list_lock, flags); + + mutex_lock(&nt->mutex); __netpoll_cleanup(&nt->np); + mutex_unlock(&nt->mutex); + spin_lock_irqsave(&target_list_lock, flags); dev_put(nt->np.dev); nt->np.dev = NULL; -- cgit v0.10.2 From 61e76b178dbe7145e8d6afa84bb4ccea71918994 Mon Sep 17 00:00:00 2001 From: Jiri Bohac Date: Fri, 30 Aug 2013 11:18:45 +0200 Subject: ICMPv6: treat dest unreachable codes 5 and 6 as EACCES, not EPROTO RFC 4443 has defined two additional codes for ICMPv6 type 1 (destination unreachable) messages: 5 - Source address failed ingress/egress policy 6 - Reject route to destination Now they are treated as protocol error and icmpv6_err_convert() converts them to EPROTO. RFC 4443 says: "Codes 5 and 6 are more informative subsets of code 1." Treat codes 5 and 6 as code 1 (EACCES) Btw, connect() returning -EPROTO confuses firefox, so that fallback to other/IPv4 addresses does not work: https://bugzilla.mozilla.org/show_bug.cgi?id=910773 Signed-off-by: Jiri Bohac Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h index e0133c7..590beda 100644 --- a/include/uapi/linux/icmpv6.h +++ b/include/uapi/linux/icmpv6.h @@ -115,6 +115,8 @@ struct icmp6hdr { #define ICMPV6_NOT_NEIGHBOUR 2 #define ICMPV6_ADDR_UNREACH 3 #define ICMPV6_PORT_UNREACH 4 +#define ICMPV6_POLICY_FAIL 5 +#define ICMPV6_REJECT_ROUTE 6 /* * Codes for Time Exceeded diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 7cfc8d2..67ae4e0 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -940,6 +940,14 @@ static const struct icmp6_err { .err = ECONNREFUSED, .fatal = 1, }, + { /* POLICY_FAIL */ + .err = EACCES, + .fatal = 1, + }, + { /* REJECT_ROUTE */ + .err = EACCES, + .fatal = 1, + }, }; int icmpv6_err_convert(u8 type, u8 code, int *err) @@ -951,7 +959,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err) switch (type) { case ICMPV6_DEST_UNREACH: fatal = 1; - if (code <= ICMPV6_PORT_UNREACH) { + if (code < ARRAY_SIZE(tab_unreach)) { *err = tab_unreach[code].err; fatal = tab_unreach[code].fatal; } -- cgit v0.10.2 From 50ad076ba43d88956707cd9d6849715de5e282bf Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Fri, 30 Aug 2013 15:01:15 +0300 Subject: gianfar: Fix reported number of sent bytes to BQL Fix the amount of sent bytes reported to BQL by reporting the number of bytes on wire in the xmit routine, and recording that value for each skb in order to be correctly confirmed on Tx confirmation cleanup. Reporting skb->len to BQL just before exiting xmit is not correct due to possible insertions of TOE block and alignment bytes in the skb->data, which are being stripped off by the controller before transmission on wire. This led to mismatch of (incorrectly) reported bytes to BQL b/w xmit and Tx confirmation, resulting in Tx timeout firing, for the h/w tx timestamping acceleration case. There's no easy way to obtain the number of bytes on wire in the Tx confirmation routine, so skb->cb is used to convey that information from xmit to Tx confirmation, for now (as proposed by Eric). Revived the currently unused GFAR_CB() construct for that purpose. Signed-off-by: Claudiu Manoil Cc: Eric Dumazet Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index b2c91dc..c4eaade 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2092,7 +2092,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) int do_tstamp, do_csum, do_vlan; u32 bufaddr; unsigned long flags; - unsigned int nr_frags, nr_txbds, length, fcb_len = 0; + unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; @@ -2147,7 +2147,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Update transmit stats */ - tx_queue->stats.tx_bytes += skb->len; + bytes_sent = skb->len; + tx_queue->stats.tx_bytes += bytes_sent; + /* keep Tx bytes on wire for BQL accounting */ + GFAR_CB(skb)->bytes_sent = bytes_sent; tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; @@ -2167,12 +2170,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } else { /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { + unsigned int frag_len; /* Point at the next BD, wrapping as needed */ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - length = skb_shinfo(skb)->frags[i].size; + frag_len = skb_shinfo(skb)->frags[i].size; - lstatus = txbdp->lstatus | length | + lstatus = txbdp->lstatus | frag_len | BD_LFLAG(TXBD_READY); /* Handle the last BD specially */ @@ -2182,7 +2186,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) bufaddr = skb_frag_dma_map(priv->dev, &skb_shinfo(skb)->frags[i], 0, - length, + frag_len, DMA_TO_DEVICE); /* set the TxBD length and buffer pointer */ @@ -2250,7 +2254,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); } - netdev_tx_sent_queue(txq, skb->len); + netdev_tx_sent_queue(txq, bytes_sent); /* We can work in parallel with gfar_clean_tx_ring(), except * when modifying num_txbdfree. Note that we didn't grab the lock @@ -2570,7 +2574,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) bdp = next_txbd(bdp, base, tx_ring_size); } - bytes_sent += skb->len; + bytes_sent += GFAR_CB(skb)->bytes_sent; dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 46f56f3..04112b9 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -575,7 +575,7 @@ struct rxfcb { }; struct gianfar_skb_cb { - int alignamount; + unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */ }; #define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) -- cgit v0.10.2 From 13c7bf0871509723a2b3c054d3d6e9e506464e71 Mon Sep 17 00:00:00 2001 From: Petr Holasek Date: Fri, 30 Aug 2013 17:02:38 +0200 Subject: ipv6: ipv6_create_tempaddr cleanup This two-liner removes max_addresses variable which is now unecessary related to patch [ipv6: remove max_addresses check from ipv6_create_tempaddr]. Signed-off-by: Petr Holasek Acked-by: Hannes Frederic Sowa Acked-by: Ding Tianhong Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 498ea99..05c83dd 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1054,7 +1054,6 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i unsigned long regen_advance; int tmp_plen; int ret = 0; - int max_addresses; u32 addr_flags; unsigned long now = jiffies; @@ -1100,7 +1099,6 @@ retry: idev->cnf.temp_prefered_lft + age - idev->cnf.max_desync_factor); tmp_plen = ifp->prefix_len; - max_addresses = idev->cnf.max_addresses; tmp_tstamp = ifp->tstamp; spin_unlock_bh(&ifp->lock); -- cgit v0.10.2 From bc6fc9fa0e1686d8a06aaa14005d3dbf7978d81f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 30 Aug 2013 15:36:14 +0100 Subject: net: fix comment typo for __skb_alloc_pages() The name of the function in the comment is __skb_alloc_page() while we are actually commenting __skb_alloc_pages(). Fix this typo and make it a valid kernel doc comment. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5ac96f3..6f1330a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1905,8 +1905,8 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); } -/* - * __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data +/** + * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used * @order: size of the allocation -- cgit v0.10.2 From a77dcb8c8ff0b3d65bb3c90f71eabad37e89bc73 Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Fri, 30 Aug 2013 15:01:16 -0500 Subject: be2net: set and query VEB/VEPA mode of the PF interface SkyHawk-R can support VEB or VEPA mode. This patch will allow the user to set/query this switch setting. Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 52c9085..1ab5dab 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2876,7 +2876,7 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) } int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, - u32 domain, u16 intf_id) + u32 domain, u16 intf_id, u16 hsw_mode) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_hsw_config *req; @@ -2903,6 +2903,13 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); } + if (!BEx_chip(adapter) && hsw_mode) { + AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, + ctxt, adapter->hba_port_num); + AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); + AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, + ctxt, hsw_mode); + } be_dws_cpu_to_le(req->context, sizeof(req->context)); status = be_mcc_notify_wait(adapter); @@ -2914,7 +2921,7 @@ err: /* Get Hyper switch config */ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, - u32 domain, u16 intf_id) + u32 domain, u16 intf_id, u8 *mode) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_hsw_config *req; @@ -2937,9 +2944,15 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); req->hdr.domain = domain; - AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, - intf_id); + AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, + ctxt, intf_id); AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); + + if (!BEx_chip(adapter)) { + AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, + ctxt, adapter->hba_port_num); + AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); + } be_dws_cpu_to_le(req->context, sizeof(req->context)); status = be_mcc_notify_wait(adapter); @@ -2950,7 +2963,11 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, sizeof(resp->context)); vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, pvid, &resp->context); - *pvid = le16_to_cpu(vid); + if (pvid) + *pvid = le16_to_cpu(vid); + if (mode) + *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, + port_fwd_type, &resp->context); } err: diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 52f3d4c..d026226 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1533,12 +1533,17 @@ struct be_cmd_req_set_mac_list { } __packed; /*********************** HSW Config ***********************/ +#define PORT_FWD_TYPE_VEPA 0x3 +#define PORT_FWD_TYPE_VEB 0x2 + struct amap_set_hsw_context { u8 interface_id[16]; u8 rsvd0[14]; u8 pvid_valid; - u8 rsvd1; - u8 rsvd2[16]; + u8 pport; + u8 rsvd1[6]; + u8 port_fwd_type[3]; + u8 rsvd2[7]; u8 pvid[16]; u8 rsvd3[32]; u8 rsvd4[32]; @@ -1563,7 +1568,9 @@ struct amap_get_hsw_req_context { } __packed; struct amap_get_hsw_resp_context { - u8 rsvd1[16]; + u8 rsvd0[6]; + u8 port_fwd_type[3]; + u8 rsvd1[7]; u8 pvid[16]; u8 rsvd2[32]; u8 rsvd3[32]; @@ -1965,9 +1972,9 @@ extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom); extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, - u32 domain, u16 intf_id); + u32 domain, u16 intf_id, u16 hsw_mode); extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, - u32 domain, u16 intf_id); + u32 domain, u16 intf_id, u8 *mode); extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, struct be_dma_mem *cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 39e0a76..e104db7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -21,6 +21,7 @@ #include "be_cmds.h" #include #include +#include MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -1212,14 +1213,14 @@ static int be_set_vf_vlan(struct net_device *netdev, adapter->vf_cfg[vf].vlan_tag = vlan; status = be_cmd_set_hsw_config(adapter, vlan, - vf + 1, adapter->vf_cfg[vf].if_handle); + vf + 1, adapter->vf_cfg[vf].if_handle, 0); } } else { /* Reset Transparent Vlan Tagging. */ adapter->vf_cfg[vf].vlan_tag = 0; vlan = adapter->vf_cfg[vf].def_vid; status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, - adapter->vf_cfg[vf].if_handle); + adapter->vf_cfg[vf].if_handle, 0); } @@ -2917,7 +2918,7 @@ static int be_vf_setup(struct be_adapter *adapter) vf_cfg->tx_rate = lnk_speed; status = be_cmd_get_hsw_config(adapter, &def_vlan, - vf + 1, vf_cfg->if_handle); + vf + 1, vf_cfg->if_handle, NULL); if (status) goto err; vf_cfg->def_vid = def_vlan; @@ -3795,6 +3796,74 @@ fw_exit: return status; } +static int be_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct be_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + int status = 0; + u16 mode = 0; + + if (!sriov_enabled(adapter)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + + status = be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + mode == BRIDGE_MODE_VEPA ? + PORT_FWD_TYPE_VEPA : + PORT_FWD_TYPE_VEB); + if (status) + goto err; + + dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return status; + } +err: + dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return status; +} + +static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask) +{ + struct be_adapter *adapter = netdev_priv(dev); + int status = 0; + u8 hsw_mode; + + if (!sriov_enabled(adapter)) + return 0; + + /* BE and Lancer chips support VEB mode only */ + if (BEx_chip(adapter) || lancer_chip(adapter)) { + hsw_mode = PORT_FWD_TYPE_VEB; + } else { + status = be_cmd_get_hsw_config(adapter, NULL, 0, + adapter->if_handle, &hsw_mode); + if (status) + return 0; + } + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + hsw_mode == PORT_FWD_TYPE_VEPA ? + BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB); +} + static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, @@ -3813,6 +3882,8 @@ static const struct net_device_ops be_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = be_netpoll, #endif + .ndo_bridge_setlink = be_ndo_bridge_setlink, + .ndo_bridge_getlink = be_ndo_bridge_getlink, }; static void be_netdev_init(struct net_device *netdev) -- cgit v0.10.2 From 50ae3c22763b77d55c59fb00274d4b9800e0c857 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:19 -0500 Subject: net: calxedaxgmac: remove NETIF_F_FRAGLIST setting The xgmac does not actually handle frag lists, so this option should not be set. It does not appear to have had any impact though. Reported-by: Lennert Buytenhek Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 7cb148c..71f6720 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1759,7 +1759,7 @@ static int xgmac_probe(struct platform_device *pdev) if (device_can_wakeup(priv->device)) priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ - ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; + ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; -- cgit v0.10.2 From ef07387faf33a95e011993200902d490b605407d Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:20 -0500 Subject: net: calxedaxgmac: read correct field in xgmac_desc_get_buf_len xgmac_desc_get_buf_len appears to have a copy/paste error. flags is the wrong field to read. We should be reading buf_size field. cpu_to_le32 should also be le32_to_cpu. This never really mattered as this function is only used for DMA mapping calls which happen to be nops with coherent DMA. Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 71f6720..d41af68 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -421,7 +421,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) static inline int desc_get_buf_len(struct xgmac_dma_desc *p) { - u32 len = cpu_to_le32(p->flags); + u32 len = le32_to_cpu(p->buf_size); return (len & DESC_BUFFER1_SZ_MASK) + ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); } -- cgit v0.10.2 From 8746f671ef04114ab25f5a35ec6219efbdf3703e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:21 -0500 Subject: net: calxedaxgmac: fix race between xgmac_tx_complete and xgmac_tx_err It is possible for the xgmac_tx_complete to run concurrently with xgmac_tx_err since there are no locks. Fix this by moving the tx error handling to a workqueue so we can disable napi while we reset the transmitter. Reported-by: Andreas Herrmann Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index d41af68..df7e3e25 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -393,6 +393,7 @@ struct xgmac_priv { char rx_pause; char tx_pause; int wolopts; + struct work_struct tx_timeout_work; }; /* XGMAC Configuration Settings */ @@ -897,21 +898,18 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) netif_wake_queue(priv->dev); } -/** - * xgmac_tx_err: - * @priv: pointer to the private device structure - * Description: it cleans the descriptors and restarts the transmission - * in case of errors. - */ -static void xgmac_tx_err(struct xgmac_priv *priv) +static void xgmac_tx_timeout_work(struct work_struct *work) { - u32 reg, value, inten; + u32 reg, value; + struct xgmac_priv *priv = + container_of(work, struct xgmac_priv, tx_timeout_work); - netif_stop_queue(priv->dev); + napi_disable(&priv->napi); - inten = readl(priv->base + XGMAC_DMA_INTR_ENA); writel(0, priv->base + XGMAC_DMA_INTR_ENA); + netif_tx_lock(priv->dev); + reg = readl(priv->base + XGMAC_DMA_CONTROL); writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); do { @@ -927,9 +925,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv) writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, priv->base + XGMAC_DMA_STATUS); - writel(inten, priv->base + XGMAC_DMA_INTR_ENA); + netif_tx_unlock(priv->dev); netif_wake_queue(priv->dev); + + napi_enable(&priv->napi); + + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } static int xgmac_hw_init(struct net_device *dev) @@ -1225,9 +1229,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) static void xgmac_tx_timeout(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); - - /* Clear Tx resources and restart transmitting again */ - xgmac_tx_err(priv); + schedule_work(&priv->tx_timeout_work); } /** @@ -1366,7 +1368,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) static irqreturn_t xgmac_interrupt(int irq, void *dev_id) { u32 intr_status; - bool tx_err = false; struct net_device *dev = (struct net_device *)dev_id; struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_extra_stats *x = &priv->xstats; @@ -1396,16 +1397,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id) if (intr_status & DMA_STATUS_TPS) { netdev_err(priv->dev, "transmit process stopped\n"); x->tx_process_stopped++; - tx_err = true; + schedule_work(&priv->tx_timeout_work); } if (intr_status & DMA_STATUS_FBI) { netdev_err(priv->dev, "fatal bus error\n"); x->fatal_bus_error++; - tx_err = true; } - - if (tx_err) - xgmac_tx_err(priv); } /* TX/RX NORMAL interrupts */ @@ -1708,6 +1705,7 @@ static int xgmac_probe(struct platform_device *pdev) ndev->netdev_ops = &xgmac_netdev_ops; SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); priv->device = &pdev->dev; priv->dev = ndev; -- cgit v0.10.2 From 1a1d4d2f30a3c7e91bb7876df95baae363be5434 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:22 -0500 Subject: net: calxedaxgmac: fix possible skb free before tx complete The TX completion code may have freed an skb before the entire sg list was transmitted. The DMA unmap calls for the fragments could also get skipped. Now set the skb pointer on every entry in the ring, not just the head of the sg list. We then use the FS (first segment) bit in the descriptors to determine skb head vs. fragment. This also fixes similar bug in xgmac_free_tx_skbufs where clean-up of a sg list that wraps at the end of the ring buffer would not get unmapped. Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index df7e3e25..64854ad 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -470,6 +470,11 @@ static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; } +static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; +} + static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) { return le32_to_cpu(p->buf1_addr); @@ -796,7 +801,7 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) { - int i, f; + int i; struct xgmac_dma_desc *p; if (!priv->tx_skbuff) @@ -807,16 +812,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) continue; p = priv->dma_tx + i; - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { - p = priv->dma_tx + i++; + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); - } - dev_kfree_skb_any(priv->tx_skbuff[i]); + if (desc_get_tx_ls(p)) + dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } } @@ -853,8 +857,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) */ static void xgmac_tx_complete(struct xgmac_priv *priv) { - int i; - while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { unsigned int entry = priv->tx_tail; struct sk_buff *skb = priv->tx_skbuff[entry]; @@ -864,33 +866,24 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) if (desc_get_owner(p)) break; - /* Verify tx error by looking at the last segment */ - if (desc_get_tx_ls(p)) - desc_get_tx_status(priv, p); - netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", priv->tx_head, priv->tx_tail); - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - priv->tx_skbuff[entry] = NULL; - priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); - - if (!skb) { - continue; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, - DMA_TX_RING_SZ); - p = priv->dma_tx + priv->tx_tail; - + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); + + /* Check tx error on the last segment */ + if (desc_get_tx_ls(p)) { + desc_get_tx_status(priv, p); + dev_kfree_skb(skb); } - dev_kfree_skb(skb); + priv->tx_skbuff[entry] = NULL; + priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); } if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > @@ -1110,7 +1103,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; - priv->tx_skbuff[entry] = NULL; + priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); if (i < (nfrags - 1)) -- cgit v0.10.2 From ca32723afedd65d612029705446bf44bde5eab14 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:23 -0500 Subject: net: calxedaxgmac: update ring buffer tx_head after barriers Ensure that the descriptor writes are visible before the ring buffer head is updated. Since writel is a barrier, we can simply update the head after the writel. Reported-by: Lennert Buytenhek Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 64854ad..f630855 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1121,9 +1121,10 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); + writel(1, priv->base + XGMAC_DMA_TX_POLL); + priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); - writel(1, priv->base + XGMAC_DMA_TX_POLL); if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < MAX_SKB_FRAGS) netif_stop_queue(dev); -- cgit v0.10.2 From cbe157b60c8e70d4b4dc937dfdd39525d8f47b46 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:24 -0500 Subject: net: calxedaxgmac: fix race with tx queue stop/wake Since the xgmac transmit start and completion work locklessly, it is possible for xgmac_xmit to stop the tx queue after the xgmac_tx_complete has run resulting in the tx queue never being woken up. Fix this by ensuring that ring buffer index updates are visible and recheck the ring space after stopping the queue. Also fix an off-by-one bug where we need to stop the queue when the ring buffer space is equal to MAX_SKB_FRAGS. The implementation used here was copied from drivers/net/ethernet/broadcom/tg3.c. Signed-off-by: Rob Herring Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index f630855..5d0b61a 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -410,6 +410,9 @@ struct xgmac_priv { #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) +#define tx_dma_ring_space(p) \ + dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) + /* XGMAC Descriptor Access Helpers */ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) { @@ -886,8 +889,10 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); } - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > - MAX_SKB_FRAGS) + /* Ensure tx_tail is visible to xgmac_xmit */ + smp_mb(); + if (unlikely(netif_queue_stopped(priv->dev) && + (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) netif_wake_queue(priv->dev); } @@ -1125,10 +1130,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < - MAX_SKB_FRAGS) + /* Ensure tx_head update is visible to tx completion */ + smp_mb(); + if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { netif_stop_queue(dev); - + /* Ensure netif_stop_queue is visible to tx completion */ + smp_mb(); + if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) + netif_start_queue(dev); + } return NETDEV_TX_OK; } -- cgit v0.10.2 From f7ea10520d7dacbc416d130c4b10505c66bf4c36 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:25 -0500 Subject: net: calxedaxgmac: enable interrupts after napi_enable Fix a race condition where the interrupt handler may have called napi_schedule before napi_enable is called. This would disable interrupts and never actually schedule napi to run. Reported-by: Lennert Buytenhek Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 5d0b61a..73d3f83 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -959,9 +959,7 @@ static int xgmac_hw_init(struct net_device *dev) DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; writel(value, ioaddr + XGMAC_DMA_BUS_MODE); - /* Enable interrupts */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + writel(0, ioaddr + XGMAC_DMA_INTR_ENA); /* Mask power mgt interrupt */ writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); @@ -1029,6 +1027,10 @@ static int xgmac_open(struct net_device *dev) napi_enable(&priv->napi); netif_start_queue(dev); + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + return 0; } -- cgit v0.10.2 From 2ee68f621af280c37a6bd72fe75e47de0750f301 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:26 -0500 Subject: net: calxedaxgmac: fix various errors in xgmac_set_rx_mode Fix xgmac_set_rx_mode to handle several conditions that were not handled correctly as Lennert Buytenhek describes: If we have, say, 100 unicast addresses, and 5 multicast addresses, the unicast address count check will evaluate to true, and set use_hash to true. The multicast address check will however evaluate to false, and use_hash won't be set to true again, and XGMAC_FRAME_FILTER_HMC won't be OR'd into XGMAC_FRAME_FILTER, but since use_hash was still true from the unicast check, netdev_for_each_mc_addr() will program the multicast addresses into the hash table instead of using the MAC address registers, but since the HMC bit wasn't set, the hash table won't be checked for multicast addresses on receive, and we'll stop receiving multicast packets entirely. Also, there is no code that zeroes out MAC address registers reg..31 at the end of this function, meaning that under the right conditions, unicast/multicast addresses that were previously in the filter but were then deleted won't be cleared out. Reported-by: Lennert Buytenhek Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 73d3f83..0cff9e3 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -618,10 +618,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, { u32 data; - data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); - writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - writel(data, ioaddr + XGMAC_ADDR_LOW(num)); + if (addr) { + data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); + writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + XGMAC_ADDR_LOW(num)); + } else { + writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); + writel(0, ioaddr + XGMAC_ADDR_LOW(num)); + } } static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, @@ -1294,6 +1299,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { use_hash = true; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; + } else { + use_hash = false; } netdev_for_each_mc_addr(ha, dev) { if (use_hash) { @@ -1310,6 +1317,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) } out: + for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) + xgmac_set_mac_addr(ioaddr, NULL, reg); for (i = 0; i < XGMAC_NUM_HASH; i++) writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); -- cgit v0.10.2 From d2a5128fbc30480763bbb8a43ec7c8161c1bf055 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:27 -0500 Subject: net: calxedaxgmac: remove some unused statistic counters rx_sa_filter_fail and tx_undeflow events are unused and impossible to occur based on how the h/w is used. We never filter on source MAC address and TX store and forward mode prevents underflow events. Reported-by: Lennert Buytenhek Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 0cff9e3..04c585c 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -353,11 +353,9 @@ struct xgmac_extra_stats { /* Receive errors */ unsigned long rx_watchdog; unsigned long rx_da_filter_fail; - unsigned long rx_sa_filter_fail; unsigned long rx_payload_error; unsigned long rx_ip_header_error; /* Tx/Rx IRQ errors */ - unsigned long tx_undeflow; unsigned long tx_process_stopped; unsigned long rx_buf_unav; unsigned long rx_process_stopped; @@ -1581,7 +1579,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = { XGMAC_STAT(rx_payload_error), XGMAC_STAT(rx_ip_header_error), XGMAC_STAT(rx_da_filter_fail), - XGMAC_STAT(rx_sa_filter_fail), XGMAC_STAT(fatal_bus_error), XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), -- cgit v0.10.2 From 531cda20cd44d5a7a77debaaeaa407d4802d7e05 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:28 -0500 Subject: net: calxedaxgmac: fix rx DMA mapping API size mismatches Fix the mismatch in the DMA mapping and unmapping sizes for receive. The unmap size must be equal to the map size and should not be the actual received frame length. The map size should also be adjusted by the NET_IP_ALIGN size since the h/w buffer size (dma_buf_sz) includes this offset. Also, add a missing dma_mapping_error check in xgmac_rx_refill. Reported-by: Lennert Buytenhek Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 04c585c..cd5010b 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -695,9 +695,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) if (unlikely(skb == NULL)) break; - priv->rx_skbuff[entry] = skb; paddr = dma_map_single(priv->device, skb->data, - bufsz, DMA_FROM_DEVICE); + priv->dma_buf_sz - NET_IP_ALIGN, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, paddr)) { + dev_kfree_skb_any(skb); + break; + } + priv->rx_skbuff[entry] = skb; desc_set_buf_addr(p, paddr, priv->dma_buf_sz); } @@ -794,13 +799,14 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) return; for (i = 0; i < DMA_RX_RING_SZ; i++) { - if (priv->rx_skbuff[i] == NULL) + struct sk_buff *skb = priv->rx_skbuff[i]; + if (skb == NULL) continue; p = priv->dma_rx + i; dma_unmap_single(priv->device, desc_get_buf_addr(p), - priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); priv->rx_skbuff[i] = NULL; } } @@ -1187,7 +1193,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) skb_put(skb, frame_len); dma_unmap_single(priv->device, desc_get_buf_addr(p), - frame_len, DMA_FROM_DEVICE); + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, priv->dev); skb->ip_summed = ip_checksum; -- cgit v0.10.2 From 92cd4253e553e87a715243a36623945edcbf1ec7 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 30 Aug 2013 16:49:29 -0500 Subject: net: calxedaxgmac: fix xgmac_xmit DMA mapping error handling On a DMA mapping error in xgmac_xmit, we should simply free the skb and return NETDEV_TX_OK rather than -EIO. In the case of errors in mapping frags, we need to undo everything that has been setup. Reported-by: Andreas Herrmann Signed-off-by: Rob Herring Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index cd5010b..78d6d6b 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -466,6 +466,13 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) p->flags = cpu_to_le32(tmpflags); } +static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) +{ + u32 tmpflags = le32_to_cpu(p->flags); + tmpflags &= TXDESC_END_RING; + p->flags = cpu_to_le32(tmpflags); +} + static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; @@ -1100,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) { dev_kfree_skb(skb); - return -EIO; + return NETDEV_TX_OK; } priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); @@ -1112,10 +1119,8 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) paddr = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, paddr)) { - dev_kfree_skb(skb); - return -EIO; - } + if (dma_mapping_error(priv->device, paddr)) + goto dma_err; entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; @@ -1151,6 +1156,22 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) netif_start_queue(dev); } return NETDEV_TX_OK; + +dma_err: + entry = priv->tx_head; + for ( ; i > 0; i--) { + entry = dma_ring_incr(entry, DMA_TX_RING_SZ); + desc = priv->dma_tx + entry; + priv->tx_skbuff[entry] = NULL; + dma_unmap_page(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + desc_clear_tx_owner(desc); + } + desc = first; + dma_unmap_single(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + dev_kfree_skb(skb); + return NETDEV_TX_OK; } static int xgmac_rx(struct xgmac_priv *priv, int limit) -- cgit v0.10.2 From 989038e217e94161862a959e82f9a1ecf8dda152 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Fri, 30 Aug 2013 17:01:36 -0700 Subject: tg3: Don't turn off led on 5719 serdes port 0 Turning off led on port 0 of the 5719 serdes causes all other ports to lose power and stop functioning. Add tg3_phy_led_bug() function to check for this condition. We use a switch() in tg3_phy_led_bug() for consistency with the tg3_phy_power_bug() function. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0da2214..9d289d3 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp) return false; } +static bool tg3_phy_led_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5719: + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) } return; } else if (do_low_power) { - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_FORCE_LED_OFF); + if (!tg3_phy_led_bug(tp)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | -- cgit v0.10.2 From c9b37458e95629b1d1171457afdcc1bf1eb7881d Mon Sep 17 00:00:00 2001 From: Liu Junliang Date: Sun, 1 Sep 2013 19:38:08 +0800 Subject: USB2NET : SR9700 : One chip USB 1.1 USB2NET SR9700Device Driver Support Signed-off-by: Liu Junliang Signed-off-by: David S. Miller diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index d84bfd4..40db312 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -268,6 +268,14 @@ config USB_NET_DM9601 This option adds support for Davicom DM9601 based USB 1.1 10/100 Ethernet adapters. +config USB_NET_SR9700 + tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" + depends on USB_USBNET + select CRC32 + help + This option adds support for CoreChip-sz SR9700 based USB 1.1 + 10/100 Ethernet adapters. + config USB_NET_SMSC75XX tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices" depends on USB_USBNET diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index e817178..8b342cf 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o obj-$(CONFIG_USB_NET_DM9601) += dm9601.o +obj-$(CONFIG_USB_NET_SR9700) += sr9700.o obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o obj-$(CONFIG_USB_NET_GL620A) += gl620a.o diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c new file mode 100644 index 0000000..7ec3e0e --- /dev/null +++ b/drivers/net/usb/sr9700.c @@ -0,0 +1,560 @@ +/* + * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices + * + * Author : Liu Junliang + * + * Based on dm9601.c + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sr9700.h" + +static int sr_read(struct usbnet *dev, u8 reg, u16 length, void *data) +{ + int err; + + err = usbnet_read_cmd(dev, SR_RD_REGS, SR_REQ_RD_REG, 0, reg, data, + length); + if ((err != length) && (err >= 0)) + err = -EINVAL; + return err; +} + +static int sr_write(struct usbnet *dev, u8 reg, u16 length, void *data) +{ + int err; + + err = usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data, + length); + if ((err >= 0) && (err < length)) + err = -EINVAL; + return err; +} + +static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value) +{ + return sr_read(dev, reg, 1, value); +} + +static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value) +{ + return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, + value, reg, NULL, 0); +} + +static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) +{ + usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, + 0, reg, data, length); +} + +static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value) +{ + usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, + value, reg, NULL, 0); +} + +static int wait_phy_eeprom_ready(struct usbnet *dev, int phy) +{ + int i; + + for (i = 0; i < SR_SHARE_TIMEOUT; i++) { + u8 tmp = 0; + int ret; + + udelay(1); + ret = sr_read_reg(dev, EPCR, &tmp); + if (ret < 0) + return ret; + + /* ready */ + if (!(tmp & EPCR_ERRE)) + return 0; + } + + netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom"); + + return -EIO; +} + +static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg, + __le16 *value) +{ + int ret; + + mutex_lock(&dev->phy_mutex); + + sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); + sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); + + ret = wait_phy_eeprom_ready(dev, phy); + if (ret < 0) + goto out_unlock; + + sr_write_reg(dev, EPCR, 0x0); + ret = sr_read(dev, EPDR, 2, value); + + netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", + phy, reg, *value, ret); + +out_unlock: + mutex_unlock(&dev->phy_mutex); + return ret; +} + +static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg, + __le16 value) +{ + int ret; + + mutex_lock(&dev->phy_mutex); + + ret = sr_write(dev, EPDR, 2, &value); + if (ret < 0) + goto out_unlock; + + sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); + sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : + (EPCR_WEP | EPCR_ERPRW)); + + ret = wait_phy_eeprom_ready(dev, phy); + if (ret < 0) + goto out_unlock; + + sr_write_reg(dev, EPCR, 0x0); + +out_unlock: + mutex_unlock(&dev->phy_mutex); + return ret; +} + +static int sr_read_eeprom_word(struct usbnet *dev, u8 offset, void *value) +{ + return sr_share_read_word(dev, 0, offset, value); +} + +static int sr9700_get_eeprom_len(struct net_device *netdev) +{ + return SR_EEPROM_LEN; +} + +static int sr9700_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct usbnet *dev = netdev_priv(netdev); + __le16 *buf = (__le16 *)data; + int ret = 0; + int i; + + /* access is 16bit */ + if ((eeprom->offset & 0x01) || (eeprom->len & 0x01)) + return -EINVAL; + + for (i = 0; i < eeprom->len / 2; i++) { + ret = sr_read_eeprom_word(dev, eeprom->offset / 2 + i, buf + i); + if (ret < 0) + break; + } + + return ret; +} + +static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) +{ + struct usbnet *dev = netdev_priv(netdev); + __le16 res; + int rc = 0; + + if (phy_id) { + netdev_dbg(netdev, "Only internal phy supported\n"); + return 0; + } + + /* Access NSR_LINKST bit for link status instead of MII_BMSR */ + if (loc == MII_BMSR) { + u8 value; + + sr_read_reg(dev, NSR, &value); + if (value & NSR_LINKST) + rc = 1; + } + sr_share_read_word(dev, 1, loc, &res); + if (rc == 1) + res = le16_to_cpu(res) | BMSR_LSTATUS; + else + res = le16_to_cpu(res) & ~BMSR_LSTATUS; + + netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", + phy_id, loc, res); + + return res; +} + +static void sr_mdio_write(struct net_device *netdev, int phy_id, int loc, + int val) +{ + struct usbnet *dev = netdev_priv(netdev); + __le16 res = cpu_to_le16(val); + + if (phy_id) { + netdev_dbg(netdev, "Only internal phy supported\n"); + return; + } + + netdev_dbg(netdev, "sr_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", + phy_id, loc, val); + + sr_share_write_word(dev, 1, loc, res); +} + +static u32 sr9700_get_link(struct net_device *netdev) +{ + struct usbnet *dev = netdev_priv(netdev); + u8 value = 0; + int rc = 0; + + /* Get the Link Status directly */ + sr_read_reg(dev, NSR, &value); + if (value & NSR_LINKST) + rc = 1; + + return rc; +} + +static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct usbnet *dev = netdev_priv(netdev); + + return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); +} + +static const struct ethtool_ops sr9700_ethtool_ops = { + .get_drvinfo = usbnet_get_drvinfo, + .get_link = sr9700_get_link, + .get_msglevel = usbnet_get_msglevel, + .set_msglevel = usbnet_set_msglevel, + .get_eeprom_len = sr9700_get_eeprom_len, + .get_eeprom = sr9700_get_eeprom, + .get_settings = usbnet_get_settings, + .set_settings = usbnet_set_settings, + .nway_reset = usbnet_nway_reset, +}; + +static void sr9700_set_multicast(struct net_device *netdev) +{ + struct usbnet *dev = netdev_priv(netdev); + /* We use the 20 byte dev->data for our 8 byte filter buffer + * to avoid allocating memory that is tricky to free later + */ + u8 *hashes = (u8 *)&dev->data; + /* rx_ctl setting : enable, disable_long, disable_crc */ + u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG; + + memset(hashes, 0x00, SR_MCAST_SIZE); + /* broadcast address */ + hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG; + if (netdev->flags & IFF_PROMISC) { + rx_ctl |= RCR_PRMSC; + } else if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > SR_MCAST_MAX) { + rx_ctl |= RCR_RUNT; + } else if (!netdev_mc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, netdev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26; + hashes[crc >> 3] |= 1 << (crc & 0x7); + } + } + + sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); + sr_write_reg_async(dev, RCR, rx_ctl); +} + +static int sr9700_set_mac_address(struct net_device *netdev, void *p) +{ + struct usbnet *dev = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) { + netdev_err(netdev, "not setting invalid mac address %pM\n", + addr->sa_data); + return -EINVAL; + } + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + sr_write_async(dev, PAR, 6, netdev->dev_addr); + + return 0; +} + +static const struct net_device_ops sr9700_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = usbnet_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = sr9700_ioctl, + .ndo_set_rx_mode = sr9700_set_multicast, + .ndo_set_mac_address = sr9700_set_mac_address, +}; + +static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) +{ + struct net_device *netdev; + struct mii_if_info *mii; + int ret; + + ret = usbnet_get_endpoints(dev, intf); + if (ret) + goto out; + + netdev = dev->net; + + netdev->netdev_ops = &sr9700_netdev_ops; + netdev->ethtool_ops = &sr9700_ethtool_ops; + netdev->hard_header_len += SR_TX_OVERHEAD; + dev->hard_mtu = netdev->mtu + netdev->hard_header_len; + /* bulkin buffer is preferably not less than 3K */ + dev->rx_urb_size = 3072; + + mii = &dev->mii; + mii->dev = netdev; + mii->mdio_read = sr_mdio_read; + mii->mdio_write = sr_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + + sr_write_reg(dev, NCR, NCR_RST); + udelay(20); + + /* read MAC + * After Chip Power on, the Chip will reload the MAC from + * EEPROM automatically to PAR. In case there is no EEPROM externally, + * a default MAC address is stored in PAR for making chip work properly. + */ + if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { + netdev_err(netdev, "Error reading MAC address\n"); + ret = -ENODEV; + goto out; + } + + /* power up and reset phy */ + sr_write_reg(dev, PRR, PRR_PHY_RST); + /* at least 10ms, here 20ms for safe */ + mdelay(20); + sr_write_reg(dev, PRR, 0); + /* at least 1ms, here 2ms for reading right register */ + udelay(2 * 1000); + + /* receive broadcast packets */ + sr9700_set_multicast(netdev); + + sr_mdio_write(netdev, mii->phy_id, MII_BMCR, BMCR_RESET); + sr_mdio_write(netdev, mii->phy_id, MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); + mii_nway_restart(mii); + +out: + return ret; +} + +static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + struct sk_buff *sr_skb; + int len; + + /* skb content (packets) format : + * p0 p1 p2 ...... pm + * / \ + * / \ + * / \ + * / \ + * p0b0 p0b1 p0b2 p0b3 ...... p0b(n-4) p0b(n-3)...p0bn + * + * p0 : packet 0 + * p0b0 : packet 0 byte 0 + * + * b0: rx status + * b1: packet length (incl crc) low + * b2: packet length (incl crc) high + * b3..n-4: packet data + * bn-3..bn: ethernet packet crc + */ + if (unlikely(skb->len < SR_RX_OVERHEAD)) { + netdev_err(dev->net, "unexpected tiny rx frame\n"); + return 0; + } + + /* one skb may contains multiple packets */ + while (skb->len > SR_RX_OVERHEAD) { + if (skb->data[0] != 0x40) + return 0; + + /* ignore the CRC length */ + len = (skb->data[1] | (skb->data[2] << 8)) - 4; + + if (len > ETH_FRAME_LEN) + return 0; + + /* the last packet of current skb */ + if (skb->len == (len + SR_RX_OVERHEAD)) { + skb_pull(skb, 3); + skb->len = len; + skb_set_tail_pointer(skb, len); + skb->truesize = len + sizeof(struct sk_buff); + return 2; + } + + /* skb_clone is used for address align */ + sr_skb = skb_clone(skb, GFP_ATOMIC); + if (!sr_skb) + return 0; + + sr_skb->len = len; + sr_skb->data = skb->data + 3; + skb_set_tail_pointer(sr_skb, len); + sr_skb->truesize = len + sizeof(struct sk_buff); + usbnet_skb_return(dev, sr_skb); + + skb_pull(skb, len + SR_RX_OVERHEAD); + }; + + return 0; +} + +static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + gfp_t flags) +{ + int len; + + /* SR9700 can only send out one ethernet packet at once. + * + * b0 b1 b2 b3 ...... b(n-4) b(n-3)...bn + * + * b0: rx status + * b1: packet length (incl crc) low + * b2: packet length (incl crc) high + * b3..n-4: packet data + * bn-3..bn: ethernet packet crc + */ + + len = skb->len; + + if (skb_headroom(skb) < SR_TX_OVERHEAD) { + struct sk_buff *skb2; + + skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) + return NULL; + } + + __skb_push(skb, SR_TX_OVERHEAD); + + /* usbnet adds padding if length is a multiple of packet size + * if so, adjust length value in header + */ + if ((skb->len % dev->maxpacket) == 0) + len++; + + skb->data[0] = len; + skb->data[1] = len >> 8; + + return skb; +} + +static void sr9700_status(struct usbnet *dev, struct urb *urb) +{ + int link; + u8 *buf; + + /* format: + b0: net status + b1: tx status 1 + b2: tx status 2 + b3: rx status + b4: rx overflow + b5: rx count + b6: tx count + b7: gpr + */ + + if (urb->actual_length < 8) + return; + + buf = urb->transfer_buffer; + + link = !!(buf[0] & 0x40); + if (netif_carrier_ok(dev->net) != link) { + usbnet_link_change(dev, link, 1); + netdev_dbg(dev->net, "Link Status is: %d\n", link); + } +} + +static int sr9700_link_reset(struct usbnet *dev) +{ + struct ethtool_cmd ecmd; + + mii_check_media(&dev->mii, 1, 1); + mii_ethtool_gset(&dev->mii, &ecmd); + + netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n", + ecmd.speed, ecmd.duplex); + + return 0; +} + +static const struct driver_info sr9700_driver_info = { + .description = "CoreChip SR9700 USB Ethernet", + .flags = FLAG_ETHER, + .bind = sr9700_bind, + .rx_fixup = sr9700_rx_fixup, + .tx_fixup = sr9700_tx_fixup, + .status = sr9700_status, + .link_reset = sr9700_link_reset, + .reset = sr9700_link_reset, +}; + +static const struct usb_device_id products[] = { + { + USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */ + .driver_info = (unsigned long)&sr9700_driver_info, + }, + {}, /* END */ +}; + +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver sr9700_usb_driver = { + .name = "sr9700", + .id_table = products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(sr9700_usb_driver); + +MODULE_AUTHOR("liujl "); +MODULE_DESCRIPTION("SR9700 one chip USB 1.1 USB to Ethernet device from http://www.corechip-sz.com/"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h new file mode 100644 index 0000000..fd687c5 --- /dev/null +++ b/drivers/net/usb/sr9700.h @@ -0,0 +1,173 @@ +/* + * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices + * + * Author : Liu Junliang + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#ifndef _SR9700_H +#define _SR9700_H + +/* sr9700 spec. register table on Linux platform */ + +/* Network Control Reg */ +#define NCR 0x00 +#define NCR_RST (1 << 0) +#define NCR_LBK (3 << 1) +#define NCR_FDX (1 << 3) +#define NCR_WAKEEN (1 << 6) +/* Network Status Reg */ +#define NSR 0x01 +#define NSR_RXRDY (1 << 0) +#define NSR_RXOV (1 << 1) +#define NSR_TX1END (1 << 2) +#define NSR_TX2END (1 << 3) +#define NSR_TXFULL (1 << 4) +#define NSR_WAKEST (1 << 5) +#define NSR_LINKST (1 << 6) +#define NSR_SPEED (1 << 7) +/* Tx Control Reg */ +#define TCR 0x02 +#define TCR_CRC_DIS (1 << 1) +#define TCR_PAD_DIS (1 << 2) +#define TCR_LC_CARE (1 << 3) +#define TCR_CRS_CARE (1 << 4) +#define TCR_EXCECM (1 << 5) +#define TCR_LF_EN (1 << 6) +/* Tx Status Reg for Packet Index 1 */ +#define TSR1 0x03 +#define TSR1_EC (1 << 2) +#define TSR1_COL (1 << 3) +#define TSR1_LC (1 << 4) +#define TSR1_NC (1 << 5) +#define TSR1_LOC (1 << 6) +#define TSR1_TLF (1 << 7) +/* Tx Status Reg for Packet Index 2 */ +#define TSR2 0x04 +#define TSR2_EC (1 << 2) +#define TSR2_COL (1 << 3) +#define TSR2_LC (1 << 4) +#define TSR2_NC (1 << 5) +#define TSR2_LOC (1 << 6) +#define TSR2_TLF (1 << 7) +/* Rx Control Reg*/ +#define RCR 0x05 +#define RCR_RXEN (1 << 0) +#define RCR_PRMSC (1 << 1) +#define RCR_RUNT (1 << 2) +#define RCR_ALL (1 << 3) +#define RCR_DIS_CRC (1 << 4) +#define RCR_DIS_LONG (1 << 5) +/* Rx Status Reg */ +#define RSR 0x06 +#define RSR_AE (1 << 2) +#define RSR_MF (1 << 6) +#define RSR_RF (1 << 7) +/* Rx Overflow Counter Reg */ +#define ROCR 0x07 +#define ROCR_ROC (0x7F << 0) +#define ROCR_RXFU (1 << 7) +/* Back Pressure Threshold Reg */ +#define BPTR 0x08 +#define BPTR_JPT (0x0F << 0) +#define BPTR_BPHW (0x0F << 4) +/* Flow Control Threshold Reg */ +#define FCTR 0x09 +#define FCTR_LWOT (0x0F << 0) +#define FCTR_HWOT (0x0F << 4) +/* rx/tx Flow Control Reg */ +#define FCR 0x0A +#define FCR_FLCE (1 << 0) +#define FCR_BKPA (1 << 4) +#define FCR_TXPEN (1 << 5) +#define FCR_TXPF (1 << 6) +#define FCR_TXP0 (1 << 7) +/* Eeprom & Phy Control Reg */ +#define EPCR 0x0B +#define EPCR_ERRE (1 << 0) +#define EPCR_ERPRW (1 << 1) +#define EPCR_ERPRR (1 << 2) +#define EPCR_EPOS (1 << 3) +#define EPCR_WEP (1 << 4) +/* Eeprom & Phy Address Reg */ +#define EPAR 0x0C +#define EPAR_EROA (0x3F << 0) +#define EPAR_PHY_ADR_MASK (0x03 << 6) +#define EPAR_PHY_ADR (0x01 << 6) +/* Eeprom & Phy Data Reg */ +#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ +/* Wakeup Control Reg */ +#define WCR 0x0F +#define WCR_MAGICST (1 << 0) +#define WCR_LINKST (1 << 2) +#define WCR_MAGICEN (1 << 3) +#define WCR_LINKEN (1 << 5) +/* Physical Address Reg */ +#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ +/* Multicast Address Reg */ +#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ +/* 0x1e unused */ +/* Phy Reset Reg */ +#define PRR 0x1F +#define PRR_PHY_RST (1 << 0) +/* Tx sdram Write Pointer Address Low */ +#define TWPAL 0x20 +/* Tx sdram Write Pointer Address High */ +#define TWPAH 0x21 +/* Tx sdram Read Pointer Address Low */ +#define TRPAL 0x22 +/* Tx sdram Read Pointer Address High */ +#define TRPAH 0x23 +/* Rx sdram Write Pointer Address Low */ +#define RWPAL 0x24 +/* Rx sdram Write Pointer Address High */ +#define RWPAH 0x25 +/* Rx sdram Read Pointer Address Low */ +#define RRPAL 0x26 +/* Rx sdram Read Pointer Address High */ +#define RRPAH 0x27 +/* Vendor ID register */ +#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ +/* Product ID register */ +#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ +/* CHIP Revision register */ +#define CHIPR 0x2C +/* 0x2D --> 0xEF unused */ +/* USB Device Address */ +#define USBDA 0xF0 +#define USBDA_USBFA (0x7F << 0) +/* RX packet Counter Reg */ +#define RXC 0xF1 +/* Tx packet Counter & USB Status Reg */ +#define TXC_USBS 0xF2 +#define TXC_USBS_TXC0 (1 << 0) +#define TXC_USBS_TXC1 (1 << 1) +#define TXC_USBS_TXC2 (1 << 2) +#define TXC_USBS_EP1RDY (1 << 5) +#define TXC_USBS_SUSFLAG (1 << 6) +#define TXC_USBS_RXFAULT (1 << 7) +/* USB Control register */ +#define USBC 0xF4 +#define USBC_EP3NAK (1 << 4) +#define USBC_EP3ACK (1 << 5) + +/* Register access commands and flags */ +#define SR_RD_REGS 0x00 +#define SR_WR_REGS 0x01 +#define SR_WR_REG 0x03 +#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE) +#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE) + +/* parameters */ +#define SR_SHARE_TIMEOUT 1000 +#define SR_EEPROM_LEN 256 +#define SR_MCAST_SIZE 8 +#define SR_MCAST_ADDR_FLAG 0x80 +#define SR_MCAST_MAX 64 +#define SR_TX_OVERHEAD 2 /* 2bytes header */ +#define SR_RX_OVERHEAD 7 /* 3bytes header + 4crc tail */ + +#endif /* _SR9700_H */ -- cgit v0.10.2 From 7367d0b573d149550d2ae25c402984b98f8f422e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 1 Sep 2013 11:51:23 -0700 Subject: drivers/net: Convert uses of compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: (and a little typing) $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1f5166a..59a62bb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -488,8 +488,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * source pruning. */ if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && - !(compare_ether_addr(adapter->netdev->dev_addr, - eth_hdr(skb)->h_source))) { + ether_addr_equal(adapter->netdev->dev_addr, + eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); goto next_desc; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index a9bc651..330d9a8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1652,14 +1652,14 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (!is_valid_ether_addr(mac) || vf >= num_vfs) return -EINVAL; - if (!compare_ether_addr(adapter->mac_addr, mac)) { + if (ether_addr_equal(adapter->mac_addr, mac)) { netdev_err(netdev, "MAC address is already in use by the PF\n"); return -EINVAL; } for (i = 0; i < num_vfs; i++) { vf_info = &sriov->vf_info[i]; - if (!compare_ether_addr(vf_info->vp->mac, mac)) { + if (ether_addr_equal(vf_info->vp->mac, mac)) { netdev_err(netdev, "MAC address is already in use by VF %d\n", i); diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 5d2a719..949076f 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -544,7 +544,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf) /* Filter out packets that aren't for us. */ if (!(dev->flags & IFF_PROMISC) && !is_multicast_ether_addr(buf) && - compare_ether_addr(dev->dev_addr, buf) != 0) + !ether_addr_equal(dev->dev_addr, buf)) return true; return false; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 606eba2..3a81315 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -323,7 +323,7 @@ next_desc: /* Never use the same address on both ends of the link, even * if the buggy firmware told us to. */ - if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr)) + if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) eth_hw_addr_random(dev->net); /* make MAC addr easily distinguishable from an IP header */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6b560f3..8f6d6c1 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -478,7 +478,7 @@ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, struct vxlan_fdb *f; hlist_for_each_entry_rcu(f, head, hlist) { - if (compare_ether_addr(mac, f->eth_addr) == 0) + if (ether_addr_equal(mac, f->eth_addr)) return f; } @@ -1049,8 +1049,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, skb->protocol = eth_type_trans(skb, vxlan->dev); /* Ignore packet loops (and multicast echo) */ - if (compare_ether_addr(eth_hdr(skb)->h_source, - vxlan->dev->dev_addr) == 0) + if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) goto drop; /* Re-examine inner Ethernet packet */ @@ -1320,7 +1319,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) if (n) { bool diff; - diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0; + diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); if (diff) { memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, dev->addr_len); diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 4684dd9..e935f61 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -602,8 +602,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len) if (bar->start_seq_num == entry_bar->start_seq_num && TID_CHECK(bar->control, entry_bar->control) && - compare_ether_addr(bar->ra, entry_bar->ta) == 0 && - compare_ether_addr(bar->ta, entry_bar->ra) == 0) { + ether_addr_equal(bar->ra, entry_bar->ta) && + ether_addr_equal(bar->ta, entry_bar->ra)) { struct ieee80211_tx_info *tx_info; tx_info = IEEE80211_SKB_CB(entry_skb); diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index b16521e..712eea9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -566,10 +566,10 @@ static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev, #undef TID_CHECK - if (compare_ether_addr(ba->ra, entry->ta)) + if (!ether_addr_equal(ba->ra, entry->ta)) continue; - if (compare_ether_addr(ba->ta, entry->ra)) + if (!ether_addr_equal(ba->ta, entry->ra)) continue; /* Mark BAR since we received the according BA */ diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 7651f5a..8bb4a9a 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1304,7 +1304,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb) return; /* and only beacons from the associated BSSID, please */ - if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) + if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) return; rtlpriv->link_info.bcn_rx_inperiod++; diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index f646b75..0d81f76 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -923,7 +923,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len) return; /* and only beacons from the associated BSSID, please */ - if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid)) + if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) return; /* check if this really is a beacon */ diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c index a8871d6..68685a8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c @@ -305,13 +305,14 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, psaddr = ieee80211_get_SA(hdr); memcpy(pstatus->psaddr, psaddr, ETH_ALEN); - addr = (!compare_ether_addr(mac->bssid, (ufc & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (ufc & IEEE80211_FCTL_FROMDS) ? - hdr->addr2 : hdr->addr3)); + addr = ether_addr_equal(mac->bssid, + (ufc & IEEE80211_FCTL_TODS) ? hdr->addr1 : + (ufc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : + hdr->addr3); match_bssid = ((IEEE80211_FTYPE_CTL != type) && (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv)) && addr; - addr = (!compare_ether_addr(praddr, rtlefuse->dev_addr)); + addr = ether_addr_equal(praddr, rtlefuse->dev_addr); packet_toself = match_bssid && addr; if (ieee80211_is_beacon(fc)) diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c index c72758d..bcd82a1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c @@ -255,16 +255,16 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw, type = WLAN_FC_GET_TYPE(fc); praddr = hdr->addr1; - packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && - (!compare_ether_addr(mac->bssid, - (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (le16_to_cpu(fc) & - IEEE80211_FCTL_FROMDS) ? - hdr->addr2 : hdr->addr3)) && (!pstatus->hwerror) && - (!pstatus->crc) && (!pstatus->icv)); - - packet_toself = packet_matchbssid && - (!compare_ether_addr(praddr, rtlefuse->dev_addr)); + packet_matchbssid = + ((IEEE80211_FTYPE_CTL != type) && + ether_addr_equal(mac->bssid, + (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ? hdr->addr1 : + (le16_to_cpu(fc) & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : + hdr->addr3) && + (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv)); + + packet_toself = (packet_matchbssid && + ether_addr_equal(praddr, rtlefuse->dev_addr)); if (ieee80211_is_beacon(fc)) packet_beacon = true; -- cgit v0.10.2 From 951fd874c3b014c4abf38a8e588d4687b98fedb4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 1 Sep 2013 13:11:55 -0700 Subject: llc: Use normal etherdevice.h tests Convert the llc_ static inlines to the equivalents from etherdevice.h and remove the llc_ static inline functions. llc_mac_null -> is_zero_ether_addr llc_mac_multicast -> is_multicast_ether_addr llc_mac_match -> ether_addr_equal Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/include/net/llc_if.h b/include/net/llc_if.h index b595a00..f0cb909 100644 --- a/include/net/llc_if.h +++ b/include/net/llc_if.h @@ -62,36 +62,6 @@ #define LLC_STATUS_CONFLICT 7 /* disconnect conn */ #define LLC_STATUS_RESET_DONE 8 /* */ -/** - * llc_mac_null - determines if a address is a null mac address - * @mac: Mac address to test if null. - * - * Determines if a given address is a null mac address. Returns 0 if the - * address is not a null mac, 1 if the address is a null mac. - */ -static inline int llc_mac_null(const u8 *mac) -{ - return is_zero_ether_addr(mac); -} - -static inline int llc_mac_multicast(const u8 *mac) -{ - return is_multicast_ether_addr(mac); -} -/** - * llc_mac_match - determines if two mac addresses are the same - * @mac1: First mac address to compare. - * @mac2: Second mac address to compare. - * - * Determines if two given mac address are the same. Returns 0 if there - * is not a complete match up to len, 1 if a complete match up to len is - * found. - */ -static inline int llc_mac_match(const u8 *mac1, const u8 *mac2) -{ - return !compare_ether_addr(mac1, mac2); -} - extern int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap); extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 48aaa89..6cba486 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -321,12 +321,12 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (llc->dev) { if (!addr->sllc_arphrd) addr->sllc_arphrd = llc->dev->type; - if (llc_mac_null(addr->sllc_mac)) + if (is_zero_ether_addr(addr->sllc_mac)) memcpy(addr->sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); if (addr->sllc_arphrd != llc->dev->type || - !llc_mac_match(addr->sllc_mac, - llc->dev->dev_addr)) { + !ether_addr_equal(addr->sllc_mac, + llc->dev->dev_addr)) { rc = -EINVAL; llc->dev = NULL; } diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 0d0d416..cd87241 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -478,8 +478,8 @@ static inline bool llc_estab_match(const struct llc_sap *sap, return llc->laddr.lsap == laddr->lsap && llc->daddr.lsap == daddr->lsap && - llc_mac_match(llc->laddr.mac, laddr->mac) && - llc_mac_match(llc->daddr.mac, daddr->mac); + ether_addr_equal(llc->laddr.mac, laddr->mac) && + ether_addr_equal(llc->daddr.mac, daddr->mac); } /** @@ -550,7 +550,7 @@ static inline bool llc_listener_match(const struct llc_sap *sap, return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN && llc->laddr.lsap == laddr->lsap && - llc_mac_match(llc->laddr.mac, laddr->mac); + ether_addr_equal(llc->laddr.mac, laddr->mac); } static struct sock *__llc_lookup_listener(struct llc_sap *sap, diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 78be45c..e585069 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -302,7 +302,7 @@ static inline bool llc_dgram_match(const struct llc_sap *sap, return sk->sk_type == SOCK_DGRAM && llc->laddr.lsap == laddr->lsap && - llc_mac_match(llc->laddr.mac, laddr->mac); + ether_addr_equal(llc->laddr.mac, laddr->mac); } /** @@ -425,7 +425,7 @@ void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb) llc_pdu_decode_da(skb, laddr.mac); llc_pdu_decode_dsap(skb, &laddr.lsap); - if (llc_mac_multicast(laddr.mac)) { + if (is_multicast_ether_addr(laddr.mac)) { llc_sap_mcast(sap, &laddr, skb); kfree_skb(skb); } else { -- cgit v0.10.2 From c3923b7a3dae842017d1206f43e23cd9ba1148b9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 1 Sep 2013 15:45:08 -0700 Subject: batman: Remove reference to compare_ether_addr This function is being removed, rename the reference. Signed-off-by: Joe Perches Acked-by: Antonio Quartulli Signed-off-by: David S. Miller diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 5d00f23..2467552 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -254,7 +254,7 @@ static inline void batadv_dbg(int type __always_unused, /* returns 1 if they are the same ethernet addr * - * note: can't use compare_ether_addr() as it requires aligned memory + * note: can't use ether_addr_equal() as it requires aligned memory */ static inline int batadv_compare_eth(const void *data1, const void *data2) { -- cgit v0.10.2 From 1372a298ea4f2772655b2e69caa634f132f1d019 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 1 Sep 2013 15:48:27 -0700 Subject: wireless: scan: Remove comment to compare_ether_addr This function is being removed, so remove the reference to it. Signed-off-by: Joe Perches Signed-off-by: David S. Miller diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ad1e406..eeb7148 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -465,10 +465,6 @@ static int cmp_bss(struct cfg80211_bss *a, } } - /* - * we can't use compare_ether_addr here since we need a < > operator. - * The binary return value of compare_ether_addr isn't enough - */ r = memcmp(a->bssid, b->bssid, sizeof(a->bssid)); if (r) return r; -- cgit v0.10.2 From 061ba049abc604e5690198bfa2c47c71c2ba725c Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Mon, 2 Sep 2013 10:20:02 +0800 Subject: drivers: net: ethernet: 8390: Kconfig: add H8300H_AKI3068NET and H8300H_H8MAX dependancy for NE_H8300 Currently only H8300H_AKI3068NET and H8300H_H8MAX define default I/O base and IRQ values for the NE_H8300 driver. Hence builds for other H8300H platforms will fail as per below. Since H8300H does not support multi platform builds, we simply limit building the driver to those two platforms specifically. The release error: drivers/net/ethernet/8390/ne-h8300.c: In function 'init_dev': drivers/net/ethernet/8390/ne-h8300.c:117:23: error: 'h8300_ne_base' undeclared (first use in this function) drivers/net/ethernet/8390/ne-h8300.c:117:23: note: each undeclared identifier is reported only once for each function it appears in drivers/net/ethernet/8390/ne-h8300.c:117:23: error: bit-field '' width not an integer constant drivers/net/ethernet/8390/ne-h8300.c:119:20: error: 'h8300_ne_irq' undeclared (first use in this function) drivers/net/ethernet/8390/ne-h8300.c: In function 'init_module': drivers/net/ethernet/8390/ne-h8300.c:647:21: error: 'h8300_ne_base' undeclared (first use in this function) drivers/net/ethernet/8390/ne-h8300.c:648:15: error: 'h8300_ne_irq' undeclared (first use in this function) drivers/net/ethernet/8390/ne-h8300.c:661:4: warning: format '%x' expects argument of type 'unsigned int', but argument 2 has type 'long unsigned int' [-Wformat] Signed-off-by: Chen Gang Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index a5f91e1..becef25 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -148,7 +148,7 @@ config PCMCIA_PCNET config NE_H8300 tristate "NE2000 compatible support for H8/300" - depends on H8300 + depends on H8300H_AKI3068NET || H8300H_H8MAX ---help--- Say Y here if you want to use the NE2000 compatible controller on the Renesas H8/300 processor. -- cgit v0.10.2 From bc35383256563580dec30c547b90a0ce8a269bb0 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 2 Sep 2013 17:06:52 +0900 Subject: net: emac: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. This is a purely cosmetic change. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 2d3b064..6b5c722 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2312,7 +2312,7 @@ static int emac_check_deps(struct emac_instance *dev, if (deps[i].ofdev == NULL) continue; if (deps[i].drvdata == NULL) - deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev); + deps[i].drvdata = platform_get_drvdata(deps[i].ofdev); if (deps[i].drvdata != NULL) there++; } @@ -2799,9 +2799,9 @@ static int emac_probe(struct platform_device *ofdev) /* display more info about what's missing ? */ goto err_reg_unmap; } - dev->mal = dev_get_drvdata(&dev->mal_dev->dev); + dev->mal = platform_get_drvdata(dev->mal_dev); if (dev->mdio_dev != NULL) - dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev); + dev->mdio_instance = platform_get_drvdata(dev->mdio_dev); /* Register with MAL */ dev->commac.ops = &emac_commac_ops; @@ -2892,7 +2892,7 @@ static int emac_probe(struct platform_device *ofdev) * fully initialized */ wmb(); - dev_set_drvdata(&ofdev->dev, dev); + platform_set_drvdata(ofdev, dev); /* There's a new kid in town ! Let's tell everybody */ wake_up_all(&emac_probe_wait); @@ -2951,7 +2951,7 @@ static int emac_probe(struct platform_device *ofdev) static int emac_remove(struct platform_device *ofdev) { - struct emac_instance *dev = dev_get_drvdata(&ofdev->dev); + struct emac_instance *dev = platform_get_drvdata(ofdev); DBG(dev, "remove" NL); -- cgit v0.10.2 From e04e37a88dcd674df131582b84ab2dc488397d53 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 2 Sep 2013 17:08:44 +0900 Subject: net: sunhme: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. This is a purely cosmetic change. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index c67e683..227c499 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2798,7 +2798,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) goto err_out_free_coherent; } - dev_set_drvdata(&op->dev, hp); + platform_set_drvdata(op, hp); if (qfe_slot != -1) printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ", -- cgit v0.10.2 From 2c0c4fbe55ec6f798ce7289f10ff190ccadeb258 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 2 Sep 2013 17:10:09 +0900 Subject: net: mdio-octeon: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. This is a purely cosmetic change. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index 7f18f80..6aee02e 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -244,7 +244,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev) struct octeon_mdiobus *bus; union cvmx_smix_en smi_en; - bus = dev_get_drvdata(&pdev->dev); + bus = platform_get_drvdata(pdev); mdiobus_unregister(bus->mii_bus); mdiobus_free(bus->mii_bus); -- cgit v0.10.2 From 1895499230b94c9c043969b312fb7898f815ace0 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 2 Sep 2013 17:11:53 +0900 Subject: net: tulip: use pci_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using pci_dev instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct pci_dev. This is a purely cosmetic change. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 4c83003..2db6c57 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2319,7 +2319,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev) struct net_device *dev; u_long iobase; - dev = dev_get_drvdata(&pdev->dev); + dev = pci_get_drvdata(pdev); iobase = dev->base_addr; unregister_netdev (dev); -- cgit v0.10.2 From 521a87cdaece333cb2b89dd6393613119af5a431 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 2 Sep 2013 17:12:41 +0900 Subject: net: sunhme: use pci_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using pci_dev instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct pci_dev. This is a purely cosmetic change. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 227c499..e37b587 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3111,7 +3111,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, goto err_out_iounmap; } - dev_set_drvdata(&pdev->dev, hp); + pci_set_drvdata(pdev, hp); if (!qfe_slot) { struct pci_dev *qpdev = qp->quattro_dev; @@ -3159,7 +3159,7 @@ err_out: static void happy_meal_pci_remove(struct pci_dev *pdev) { - struct happy_meal *hp = dev_get_drvdata(&pdev->dev); + struct happy_meal *hp = pci_get_drvdata(pdev); struct net_device *net_dev = hp->dev; unregister_netdev(net_dev); @@ -3171,7 +3171,7 @@ static void happy_meal_pci_remove(struct pci_dev *pdev) free_netdev(net_dev); - dev_set_drvdata(&pdev->dev, NULL); + pci_set_drvdata(pdev, NULL); } static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { -- cgit v0.10.2 From 094afe7d556428a2ce2df0f6a4b333f7ba4d74d5 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 2 Sep 2013 16:40:56 +0800 Subject: vhost_net: make vhost_zerocopy_signal_used() return void None of its caller use its return value, so let it return void. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 969a859..280ee66 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -276,8 +276,8 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, * of used idx. Once lower device DMA done contiguously, we will signal KVM * guest used idx. */ -static int vhost_zerocopy_signal_used(struct vhost_net *net, - struct vhost_virtqueue *vq) +static void vhost_zerocopy_signal_used(struct vhost_net *net, + struct vhost_virtqueue *vq) { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); @@ -297,7 +297,6 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net, } if (j) nvq->done_idx = i; - return j; } static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) -- cgit v0.10.2 From c92112aed3f0b14fdd2dbd9f192cce1af22c0e1c Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 2 Sep 2013 16:40:57 +0800 Subject: vhost_net: use vhost_add_used_and_signal_n() in vhost_zerocopy_signal_used() We tend to batch the used adding and signaling in vhost_zerocopy_callback() which may result more than 100 used buffers to be updated in vhost_zerocopy_signal_used() in some cases. So switch to use vhost_add_used_and_signal_n() to avoid multiple calls to vhost_add_used_and_signal(). Which means much less times of used index updating and memory barriers. 2% performance improvement were seen on netperf TCP_RR test. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 280ee66..8a6dd0d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -281,7 +281,7 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net, { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); - int i; + int i, add; int j = 0; for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { @@ -289,14 +289,17 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net, vhost_net_tx_err(net); if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { vq->heads[i].len = VHOST_DMA_CLEAR_LEN; - vhost_add_used_and_signal(vq->dev, vq, - vq->heads[i].id, 0); ++j; } else break; } - if (j) - nvq->done_idx = i; + while (j) { + add = min(UIO_MAXIOV - nvq->done_idx, j); + vhost_add_used_and_signal_n(vq->dev, vq, + &vq->heads[nvq->done_idx], add); + nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; + j -= add; + } } static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) -- cgit v0.10.2 From c49e4e573be86acd36c747511ea5dc76be122206 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 2 Sep 2013 16:40:58 +0800 Subject: vhost: switch to use vhost_add_used_n() Let vhost_add_used() to use vhost_add_used_n() to reduce the code duplication. To avoid the overhead brought by __copy_to_user(). We will use put_user() when one used need to be added. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 448efe0..9a9502a 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1332,48 +1332,9 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); * want to notify the guest, using eventfd. */ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) { - struct vring_used_elem __user *used; + struct vring_used_elem heads = { head, len }; - /* The virtqueue contains a ring of used buffers. Get a pointer to the - * next entry in that used ring. */ - used = &vq->used->ring[vq->last_used_idx % vq->num]; - if (__put_user(head, &used->id)) { - vq_err(vq, "Failed to write used id"); - return -EFAULT; - } - if (__put_user(len, &used->len)) { - vq_err(vq, "Failed to write used len"); - return -EFAULT; - } - /* Make sure buffer is written before we update index. */ - smp_wmb(); - if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) { - vq_err(vq, "Failed to increment used idx"); - return -EFAULT; - } - if (unlikely(vq->log_used)) { - /* Make sure data is seen before log. */ - smp_wmb(); - /* Log used ring entry write. */ - log_write(vq->log_base, - vq->log_addr + - ((void __user *)used - (void __user *)vq->used), - sizeof *used); - /* Log used index update. */ - log_write(vq->log_base, - vq->log_addr + offsetof(struct vring_used, idx), - sizeof vq->used->idx); - if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); - } - vq->last_used_idx++; - /* If the driver never bothers to signal in a very long while, - * used index might wrap around. If that happens, invalidate - * signalled_used index we stored. TODO: make sure driver - * signals at least once in 2^16 and remove this. */ - if (unlikely(vq->last_used_idx == vq->signalled_used)) - vq->signalled_used_valid = false; - return 0; + return vhost_add_used_n(vq, &heads, 1); } EXPORT_SYMBOL_GPL(vhost_add_used); @@ -1387,7 +1348,16 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, start = vq->last_used_idx % vq->num; used = vq->used->ring + start; - if (__copy_to_user(used, heads, count * sizeof *used)) { + if (count == 1) { + if (__put_user(heads[0].id, &used->id)) { + vq_err(vq, "Failed to write used id"); + return -EFAULT; + } + if (__put_user(heads[0].len, &used->len)) { + vq_err(vq, "Failed to write used len"); + return -EFAULT; + } + } else if (__copy_to_user(used, heads, count * sizeof *used)) { vq_err(vq, "Failed to write used"); return -EFAULT; } -- cgit v0.10.2 From ce21a02913dc79205485637b6e0927a4c800c4a4 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 2 Sep 2013 16:40:59 +0800 Subject: vhost_net: determine whether or not to use zerocopy at one time Currently, even if the packet length is smaller than VHOST_GOODCOPY_LEN, if upend_idx != done_idx we still set zcopy_used to true and rollback this choice later. This could be avoided by determining zerocopy once by checking all conditions at one time before. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 8a6dd0d..3f89dea 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -404,43 +404,36 @@ static void handle_tx(struct vhost_net *net) iov_length(nvq->hdr, s), hdr_size); break; } - zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || - nvq->upend_idx != nvq->done_idx); + + zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN + && (nvq->upend_idx + 1) % UIO_MAXIOV != + nvq->done_idx + && vhost_net_tx_select_zcopy(net); /* use msg_control to pass vhost zerocopy ubuf info to skb */ if (zcopy_used) { + struct ubuf_info *ubuf; + ubuf = nvq->ubuf_info + nvq->upend_idx; + vq->heads[nvq->upend_idx].id = head; - if (!vhost_net_tx_select_zcopy(net) || - len < VHOST_GOODCOPY_LEN) { - /* copy don't need to wait for DMA done */ - vq->heads[nvq->upend_idx].len = - VHOST_DMA_DONE_LEN; - msg.msg_control = NULL; - msg.msg_controllen = 0; - ubufs = NULL; - } else { - struct ubuf_info *ubuf; - ubuf = nvq->ubuf_info + nvq->upend_idx; - - vq->heads[nvq->upend_idx].len = - VHOST_DMA_IN_PROGRESS; - ubuf->callback = vhost_zerocopy_callback; - ubuf->ctx = nvq->ubufs; - ubuf->desc = nvq->upend_idx; - msg.msg_control = ubuf; - msg.msg_controllen = sizeof(ubuf); - ubufs = nvq->ubufs; - kref_get(&ubufs->kref); - } + vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; + ubuf->callback = vhost_zerocopy_callback; + ubuf->ctx = nvq->ubufs; + ubuf->desc = nvq->upend_idx; + msg.msg_control = ubuf; + msg.msg_controllen = sizeof(ubuf); + ubufs = nvq->ubufs; + kref_get(&ubufs->kref); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; - } else + } else { msg.msg_control = NULL; + ubufs = NULL; + } /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(NULL, sock, &msg, len); if (unlikely(err < 0)) { if (zcopy_used) { - if (ubufs) - vhost_net_ubuf_put(ubufs); + vhost_net_ubuf_put(ubufs); nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) % UIO_MAXIOV; } -- cgit v0.10.2 From 19c73b3e08d16ee923f3962df4abf6205127896a Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 2 Sep 2013 16:41:00 +0800 Subject: vhost_net: poll vhost queue after marking DMA is done We used to poll vhost queue before making DMA is done, this is racy if vhost thread were waked up before marking DMA is done which can result the signal to be missed. Fix this by always polling the vhost thread before DMA is done. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 3f89dea..8e9dc55 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -308,6 +308,11 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) struct vhost_virtqueue *vq = ubufs->vq; int cnt = atomic_read(&ubufs->kref.refcount); + /* set len to mark this desc buffers done DMA */ + vq->heads[ubuf->desc].len = success ? + VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; + vhost_net_ubuf_put(ubufs); + /* * Trigger polling thread if guest stopped submitting new buffers: * in this case, the refcount after decrement will eventually reach 1 @@ -318,10 +323,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) */ if (cnt <= 2 || !(cnt % 16)) vhost_poll_queue(&vq->poll); - /* set len to mark this desc buffers done DMA */ - vq->heads[ubuf->desc].len = success ? - VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; - vhost_net_ubuf_put(ubufs); } /* Expects to be always run from workqueue - which acts as -- cgit v0.10.2 From f7c6be404d8fa52c54ff931390aab01e5c7654d6 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Mon, 2 Sep 2013 16:41:01 +0800 Subject: vhost_net: correctly limit the max pending buffers As Michael point out, We used to limit the max pending DMAs to get better cache utilization. But it was not done correctly since it was one done when there's no new buffers submitted from guest. Guest can easily exceeds the limitation by keeping sending packets. So this patch moves the check into main loop. Tests shows about 5%-10% improvement on per cpu throughput for guest tx. Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 8e9dc55..831eb4f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -363,6 +363,13 @@ static void handle_tx(struct vhost_net *net) if (zcopy) vhost_zerocopy_signal_used(net, vq); + /* If more outstanding DMAs, queue the work. + * Handle upend_idx wrap around + */ + if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) + % UIO_MAXIOV == nvq->done_idx)) + break; + head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, @@ -372,17 +379,6 @@ static void handle_tx(struct vhost_net *net) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { - int num_pends; - - /* If more outstanding DMAs, queue the work. - * Handle upend_idx wrap around - */ - num_pends = likely(nvq->upend_idx >= nvq->done_idx) ? - (nvq->upend_idx - nvq->done_idx) : - (nvq->upend_idx + UIO_MAXIOV - - nvq->done_idx); - if (unlikely(num_pends > VHOST_MAX_PEND)) - break; if (unlikely(vhost_enable_notify(&net->dev, vq))) { vhost_disable_notify(&net->dev, vq); continue; -- cgit v0.10.2 From 6c8d23f78646b20bdbdad476d015b6594ac8ff1c Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Mon, 2 Sep 2013 13:51:38 +0200 Subject: bonding: simplify and fix peer notification This patch aims to remove a use of the bond->lock for mutual exclusion which will later allow easier migration to RCU of the users of this functionality. We use RTNL as a synchronizing mechanism since it's always held when send_peer_notif is set, and when it is decremented from the notifier function. We can also drop some locking, and fix the leakage of the send_peer_notif counter. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c50679f..05e638b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -828,7 +828,6 @@ static bool bond_should_notify_peers(struct bonding *bond) test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; - bond->send_peer_notif--; return true; } @@ -2259,12 +2258,8 @@ re_arm: read_unlock(&bond->lock); if (should_notify_peers) { - if (!rtnl_trylock()) { - read_lock(&bond->lock); - bond->send_peer_notif++; - read_unlock(&bond->lock); + if (!rtnl_trylock()) return; - } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } @@ -2876,12 +2871,8 @@ re_arm: read_unlock(&bond->lock); if (should_notify_peers) { - if (!rtnl_trylock()) { - read_lock(&bond->lock); - bond->send_peer_notif++; - read_unlock(&bond->lock); + if (!rtnl_trylock()) return; - } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } @@ -2916,6 +2907,10 @@ static int bond_master_netdev_event(unsigned long event, case NETDEV_REGISTER: bond_create_proc_entry(event_bond); break; + case NETDEV_NOTIFY_PEERS: + if (event_bond->send_peer_notif) + event_bond->send_peer_notif--; + break; default: break; } @@ -3213,11 +3208,8 @@ static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - write_lock_bh(&bond->lock); - bond->send_peer_notif = 0; - write_unlock_bh(&bond->lock); - bond_work_cancel_all(bond); + bond->send_peer_notif = 0; if (bond_is_lb(bond)) { /* Must be called only after all * slaves have been released -- cgit v0.10.2 From ee8487c0e1aed52b534f9bf31d3934af4c50bf33 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Mon, 2 Sep 2013 13:51:39 +0200 Subject: bonding: trivial: remove outdated comment and braces We don't have to release all slaves when closing the bond dev, so remove the outdated comment and the braces around the left single statement. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 05e638b..486e041 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3210,12 +3210,8 @@ static int bond_close(struct net_device *bond_dev) bond_work_cancel_all(bond); bond->send_peer_notif = 0; - if (bond_is_lb(bond)) { - /* Must be called only after all - * slaves have been released - */ + if (bond_is_lb(bond)) bond_alb_deinitialize(bond); - } bond->recv_probe = NULL; return 0; -- cgit v0.10.2 From c509316b5b33664b08b2a40d09534e0bd3c6b648 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Mon, 2 Sep 2013 13:51:40 +0200 Subject: bonding: simplify bond_3ad_update_lacp_rate and use RTNL for sync We can drop the use of bond->lock for mutual exclusion in bond_3ad_update_lacp_rate and use RTNL in the sysfs store function instead. This way we'll prevent races with mode change and interface up/down as well as simplify update_lacp_rate by removing the check for port->slave because it'll always be initialized (done while enslaving with RTNL). This change will also help in the future removal of reader bond->lock from bond_enslave. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 9010265..0d8f427 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2514,17 +2514,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, */ void bond_3ad_update_lacp_rate(struct bonding *bond) { - struct slave *slave; struct port *port = NULL; + struct slave *slave; int lacp_fast; - write_lock_bh(&bond->lock); lacp_fast = bond->params.lacp_fast; - bond_for_each_slave(bond, slave) { port = &(SLAVE_AD_INFO(slave).port); - if (port->slave == NULL) - continue; __get_state_machine_lock(port); if (lacp_fast) port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; @@ -2532,6 +2528,4 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; __release_state_machine_lock(port); } - - write_unlock_bh(&bond->lock); } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 0f539de..ce46776 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -852,8 +852,11 @@ static ssize_t bonding_store_lacp(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - int new_value, ret = count; struct bonding *bond = to_bond(d); + int new_value, ret = count; + + if (!rtnl_trylock()) + return restart_syscall(); if (bond->dev->flags & IFF_UP) { pr_err("%s: Unable to update LACP rate because interface is up.\n", @@ -883,6 +886,8 @@ static ssize_t bonding_store_lacp(struct device *d, ret = -EINVAL; } out: + rtnl_unlock(); + return ret; } static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, -- cgit v0.10.2 From 9b7b165ac1adf5169f0ee03d107423ce7f5805d9 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Mon, 2 Sep 2013 13:51:41 +0200 Subject: bonding: drop read_lock in bond_fix_features We're protected by RTNL so nothing can happen and we can safely drop the read bond->lock. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 486e041..c5ebdc5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1084,18 +1084,16 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) /*---------------------------------- IOCTL ----------------------------------*/ static netdev_features_t bond_fix_features(struct net_device *dev, - netdev_features_t features) + netdev_features_t features) { - struct slave *slave; struct bonding *bond = netdev_priv(dev); netdev_features_t mask; - - read_lock(&bond->lock); + struct slave *slave; if (list_empty(&bond->slave_list)) { /* Disable adding VLANs to empty bond. But why? --mq */ features |= NETIF_F_VLAN_CHALLENGED; - goto out; + return features; } mask = features; @@ -1109,8 +1107,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev, } features = netdev_add_tso_features(features, mask); -out: - read_unlock(&bond->lock); return features; } -- cgit v0.10.2 From c48268611a3df84a9250d2fc34ad671cdae43440 Mon Sep 17 00:00:00 2001 From: "nikolay@redhat.com" Date: Mon, 2 Sep 2013 13:51:42 +0200 Subject: bonding: drop read_lock in bond_compute_features bond_compute_features is always called with RTNL held, so we can safely drop the read bond->lock. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c5ebdc5..39e5b1c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1116,15 +1116,13 @@ static netdev_features_t bond_fix_features(struct net_device *dev, static void bond_compute_features(struct bonding *bond) { - struct slave *slave; - struct net_device *bond_dev = bond->dev; + unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; netdev_features_t vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int gso_max_size = GSO_MAX_SIZE; + struct net_device *bond_dev = bond->dev; u16 gso_max_segs = GSO_MAX_SEGS; - unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; - - read_lock(&bond->lock); + struct slave *slave; if (list_empty(&bond->slave_list)) goto done; @@ -1150,8 +1148,6 @@ done: flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; bond_dev->priv_flags = flags | dst_release_flag; - read_unlock(&bond->lock); - netdev_change_features(bond_dev); } -- cgit v0.10.2 From 8b7ed2d91d6afb0b55ba75f94b66e51f70783a46 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 2 Sep 2013 15:34:54 +0200 Subject: iptunnels: remove net arg from iptunnel_xmit() This argument is not used, let's remove it. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8f6d6c1..e25c97d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1528,8 +1528,7 @@ int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, if (err) return err; - return iptunnel_xmit(net, rt, skb, src, dst, - IPPROTO_UDP, tos, ttl, df); + return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df); } EXPORT_SYMBOL_GPL(vxlan_xmit_skb); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 0ce316b..94fe8fd 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -146,8 +146,7 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, } int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); -int iptunnel_xmit(struct net *net, struct rtable *rt, - struct sk_buff *skb, +int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 830de3f..0a6cf0e 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -654,8 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } } - err = iptunnel_xmit(tunnel->net, rt, skb, - fl4.saddr, fl4.daddr, protocol, + err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 850525b..e820458 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -46,8 +46,7 @@ #include #include -int iptunnel_xmit(struct net *net, struct rtable *rt, - struct sk_buff *skb, +int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df) { diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index f18f842..1d1458a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -888,8 +888,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ttl = iph6->hop_limit; tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); - err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, - IPPROTO_IPV6, tos, ttl, df); + err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, + ttl, df); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 21d5073..9b3713e 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -176,7 +176,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) skb->local_df = 1; - return iptunnel_xmit(net, rt, skb, fl.saddr, + return iptunnel_xmit(rt, skb, fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, OVS_CB(skb)->tun_key->ipv4_tos, OVS_CB(skb)->tun_key->ipv4_ttl, df); -- cgit v0.10.2 From 117961878cc1386923cfddcdd9016b777827c8dd Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 2 Sep 2013 15:34:55 +0200 Subject: vxlan: remove net arg from vxlan[6]_xmit_skb() This argument is not used, let's remove it. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e25c97d..a334bfb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1380,7 +1380,7 @@ static int handle_offloads(struct sk_buff *skb) } #if IS_ENABLED(CONFIG_IPV6) -static int vxlan6_xmit_skb(struct net *net, struct vxlan_sock *vs, +static int vxlan6_xmit_skb(struct vxlan_sock *vs, struct dst_entry *dst, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, @@ -1475,7 +1475,7 @@ static int vxlan6_xmit_skb(struct net *net, struct vxlan_sock *vs, } #endif -int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, +int vxlan_xmit_skb(struct vxlan_sock *vs, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni) @@ -1651,7 +1651,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - err = vxlan_xmit_skb(dev_net(dev), vxlan->vn_sock, rt, skb, + err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, fl4.saddr, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, htonl(vni << 8)); @@ -1703,7 +1703,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip6_dst_hoplimit(ndst); - err = vxlan6_xmit_skb(dev_net(dev), vxlan->vn_sock, ndst, skb, + err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb, dev, &fl6.saddr, &fl6.daddr, 0, ttl, src_port, dst_port, htonl(vni << 8)); #endif diff --git a/include/net/vxlan.h b/include/net/vxlan.h index d2b88ca..e09c40b 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -29,7 +29,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, void vxlan_sock_release(struct vxlan_sock *vs); -int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs, +int vxlan_xmit_skb(struct vxlan_sock *vs, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni); diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index a006024..a481c03 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -176,7 +176,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) inet_get_local_port_range(&port_min, &port_max); src_port = vxlan_src_port(port_min, port_max, skb); - err = vxlan_xmit_skb(net, vxlan_port->vs, rt, skb, + err = vxlan_xmit_skb(vxlan_port->vs, rt, skb, fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, OVS_CB(skb)->tun_key->ipv4_tos, OVS_CB(skb)->tun_key->ipv4_ttl, df, -- cgit v0.10.2 From 8b27f27797cac5ed9b2f3e63dac89a7ae70e70a7 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 2 Sep 2013 15:34:56 +0200 Subject: skb: allow skb_scrub_packet() to be used by tunnels This function was only used when a packet was sent to another netns. Now, it can also be used after tunnel encapsulation or decapsulation. Only skb_orphan() should not be done when a packet is not crossing netns. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6f1330a..2ddb48d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2392,7 +2392,7 @@ extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); -extern void skb_scrub_packet(struct sk_buff *skb); +extern void skb_scrub_packet(struct sk_buff *skb, bool xnet); extern struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); diff --git a/net/core/dev.c b/net/core/dev.c index 6fbb0c9..07684e8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1697,7 +1697,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) * call skb_scrub_packet() after it to clear pkt_type _after_ calling * eth_type_trans(). */ - skb_scrub_packet(skb); + skb_scrub_packet(skb, true); return netif_rx(skb); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2c3d0f5..d81cff1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3500,17 +3500,22 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, EXPORT_SYMBOL(skb_try_coalesce); /** - * skb_scrub_packet - scrub an skb before sending it to another netns + * skb_scrub_packet - scrub an skb * * @skb: buffer to clean - * - * skb_scrub_packet can be used to clean an skb before injecting it in - * another namespace. We have to clear all information in the skb that - * could impact namespace isolation. + * @xnet: packet is crossing netns + * + * skb_scrub_packet can be used after encapsulating or decapsulting a packet + * into/from a tunnel. Some information have to be cleared during these + * operations. + * skb_scrub_packet can also be used to clean a skb before injecting it in + * another namespace (@xnet == true). We have to clear all information in the + * skb that could impact namespace isolation. */ -void skb_scrub_packet(struct sk_buff *skb) +void skb_scrub_packet(struct sk_buff *skb, bool xnet) { - skb_orphan(skb); + if (xnet) + skb_orphan(skb); skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 0a6cf0e..b0e74e1 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -462,7 +462,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, } if (!net_eq(tunnel->net, dev_net(tunnel->dev))) - skb_scrub_packet(skb); + skb_scrub_packet(skb, true); gro_cells_receive(&tunnel->gro_cells, skb); return 0; @@ -615,7 +615,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } if (!net_eq(tunnel->net, dev_net(dev))) - skb_scrub_packet(skb); + skb_scrub_packet(skb, true); if (tunnel->err_count > 0) { if (time_before(jiffies, diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index d6e00a3..72372ac 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -830,7 +830,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, tstats->rx_bytes += skb->len; if (!net_eq(t->net, dev_net(t->dev))) - skb_scrub_packet(skb); + skb_scrub_packet(skb, true); netif_rx(skb); @@ -1002,7 +1002,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, } if (!net_eq(t->net, dev_net(dev))) - skb_scrub_packet(skb); + skb_scrub_packet(skb, true); /* * Okay, now see if we can stuff it in the buffer as-is. diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1d1458a..b2e44f4 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -622,7 +622,7 @@ static int ipip6_rcv(struct sk_buff *skb) tstats->rx_bytes += skb->len; if (!net_eq(tunnel->net, dev_net(tunnel->dev))) - skb_scrub_packet(skb); + skb_scrub_packet(skb, true); netif_rx(skb); return 0; @@ -861,7 +861,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, } if (!net_eq(tunnel->net, dev_net(dev))) - skb_scrub_packet(skb); + skb_scrub_packet(skb, true); /* * Okay, now see if we can stuff it in the buffer as-is. -- cgit v0.10.2 From 963a88b31ddbbe99f38502239b1a46601773d217 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 2 Sep 2013 15:34:57 +0200 Subject: tunnels: harmonize cleanup done on skb on xmit path The goal of this patch is to harmonize cleanup done on a skbuff on xmit path. Before this patch, behaviors were different depending of the tunnel type. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a334bfb..ebda3a1 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1397,6 +1397,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, skb->encapsulation = 1; } + skb_scrub_packet(skb, false); + min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + VXLAN_HLEN + sizeof(struct ipv6hdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); @@ -1432,7 +1434,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); - skb_dst_drop(skb); skb_dst_set(skb, dst); if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) { @@ -1528,7 +1529,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, if (err) return err; - return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df); + return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, + false); } EXPORT_SYMBOL_GPL(vxlan_xmit_skb); diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 2265b0b..6d1549c 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -75,7 +75,6 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device_stats *stats = &dev->stats; int pkt_len, err; - nf_reset(skb); pkt_len = skb->len; err = ip6_local_out(skb); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 94fe8fd..a0a4a10 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -148,7 +148,7 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, - __u8 tos, __u8 ttl, __be16 df); + __u8 tos, __u8 ttl, __be16 df, bool xnet); static inline void iptunnel_xmit_stats(int err, struct net_device_stats *err_stats, diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index b0e74e1..88d7d7d 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -614,9 +614,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - if (!net_eq(tunnel->net, dev_net(dev))) - skb_scrub_packet(skb, true); - if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { @@ -655,7 +652,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, - ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df); + ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df, + !net_eq(tunnel->net, dev_net(dev))); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return; diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index e820458..d6c856b 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -48,16 +48,15 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, - __u8 tos, __u8 ttl, __be16 df) + __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; - nf_reset(skb); - secpath_reset(skb); + skb_scrub_packet(skb, xnet); + skb->rxhash = 0; - skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index f2d0a42..f179ff1 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -694,6 +694,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, tunnel->err_count = 0; } + skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); + max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; if (skb_headroom(skb) < max_headroom || skb_shared(skb) || @@ -710,8 +712,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, skb = new_skb; } - skb_dst_drop(skb); - if (fl6->flowi6_mark) { skb_dst_set(skb, dst); ndst = NULL; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 72372ac..ecbcdbd 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1001,8 +1001,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, goto tx_err_dst_release; } - if (!net_eq(t->net, dev_net(dev))) - skb_scrub_packet(skb, true); + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); /* * Okay, now see if we can stuff it in the buffer as-is. @@ -1021,7 +1020,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, consume_skb(skb); skb = new_skb; } - skb_dst_drop(skb); if (fl6->flowi6_mark) { skb_dst_set(skb, dst); ndst = NULL; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index b2e44f4..82b425b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -860,9 +860,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, tunnel->err_count = 0; } - if (!net_eq(tunnel->net, dev_net(dev))) - skb_scrub_packet(skb, true); - /* * Okay, now see if we can stuff it in the buffer as-is. */ @@ -889,7 +886,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, - ttl, df); + ttl, df, !net_eq(tunnel->net, dev_net(dev))); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 9b3713e..c99dea5 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -179,7 +179,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) return iptunnel_xmit(rt, skb, fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, OVS_CB(skb)->tun_key->ipv4_tos, - OVS_CB(skb)->tun_key->ipv4_ttl, df); + OVS_CB(skb)->tun_key->ipv4_ttl, df, false); err_free_rt: ip_rt_put(rt); error: -- cgit v0.10.2 From ea23192e8e577dfc51e0f4fc5ca113af334edff9 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 2 Sep 2013 15:34:58 +0200 Subject: tunnels: harmonize cleanup done on skb on rx path The goal of this patch is to harmonize cleanup done on a skbuff on rx path. Before this patch, behaviors were different depending of the tunnel type. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/include/net/dst.h b/include/net/dst.h index 1f8fd10..3bc4865 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -311,11 +311,13 @@ static inline void skb_dst_force(struct sk_buff *skb) * __skb_tunnel_rx - prepare skb for rx reinsert * @skb: buffer * @dev: tunnel device + * @net: netns for packet i/o * * After decapsulation, packet is going to re-enter (netif_rx()) our stack, * so make some cleanups. (no accounting done) */ -static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) +static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, + struct net *net) { skb->dev = dev; @@ -327,8 +329,7 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) if (!skb->l4_rxhash) skb->rxhash = 0; skb_set_queue_mapping(skb, 0); - skb_dst_drop(skb); - nf_reset(skb); + skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); } /** @@ -340,12 +341,13 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) * so make some cleanups, and perform accounting. * Note: this accounting is not SMP safe. */ -static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) +static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, + struct net *net) { /* TODO : stats should be SMP safe */ dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; - __skb_tunnel_rx(skb, dev); + __skb_tunnel_rx(skb, dev, net); } /* Children define the path of the packet through the diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 88d7d7d..ac9fabe 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -461,8 +461,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, skb->dev = tunnel->dev; } - if (!net_eq(tunnel->net, dev_net(tunnel->dev))) - skb_scrub_packet(skb, true); + skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev))); gro_cells_receive(&tunnel->gro_cells, skb); return 0; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index bacc0bc..9ae54b0 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2067,9 +2067,8 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; - skb->pkt_type = PACKET_HOST; - skb_tunnel_rx(skb, reg_dev); + skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev)); netif_rx(skb); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index f179ff1..db992a3 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -509,8 +509,6 @@ static int ip6gre_rcv(struct sk_buff *skb) goto drop; } - secpath_reset(skb); - skb->protocol = gre_proto; /* WCCP version 1 and 2 protocol decoding. * - Change protocol to IP @@ -525,7 +523,6 @@ static int ip6gre_rcv(struct sk_buff *skb) skb->mac_header = skb->network_header; __pskb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); - skb->pkt_type = PACKET_HOST; if (((flags&GRE_CSUM) && csum) || (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { @@ -557,7 +554,7 @@ static int ip6gre_rcv(struct sk_buff *skb) skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } - __skb_tunnel_rx(skb, tunnel->dev); + __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); skb_reset_network_header(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index ecbcdbd..55999d9 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -802,14 +802,12 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, rcu_read_unlock(); goto discard; } - secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->protocol = htons(protocol); - skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); - __skb_tunnel_rx(skb, t->dev); + __skb_tunnel_rx(skb, t->dev, t->net); err = dscp_ecn_decapsulate(t, ipv6h, skb); if (unlikely(err)) { @@ -829,9 +827,6 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, tstats->rx_packets++; tstats->rx_bytes += skb->len; - if (!net_eq(t->net, dev_net(t->dev))) - skb_scrub_packet(skb, true); - netif_rx(skb); rcu_read_unlock(); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index a60a84e..f365310 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -672,9 +672,8 @@ static int pim6_rcv(struct sk_buff *skb) skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = CHECKSUM_NONE; - skb->pkt_type = PACKET_HOST; - skb_tunnel_rx(skb, reg_dev); + skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev)); netif_rx(skb); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 82b425b..19abcc9 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -581,12 +581,10 @@ static int ipip6_rcv(struct sk_buff *skb) tunnel->parms.iph.protocol != 0) goto out; - secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); - skb->pkt_type = PACKET_HOST; if (tunnel->dev->priv_flags & IFF_ISATAP) { if (!isatap_chksrc(skb, iph, tunnel)) { @@ -603,7 +601,7 @@ static int ipip6_rcv(struct sk_buff *skb) } } - __skb_tunnel_rx(skb, tunnel->dev); + __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { @@ -621,8 +619,6 @@ static int ipip6_rcv(struct sk_buff *skb) tstats->rx_packets++; tstats->rx_bytes += skb->len; - if (!net_eq(tunnel->net, dev_net(tunnel->dev))) - skb_scrub_packet(skb, true); netif_rx(skb); return 0; -- cgit v0.10.2 From 82476b316084e6826a9dd339d1dad892a598af9a Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Mon, 2 Sep 2013 16:26:51 +0200 Subject: net: correctly interlink lower/upper devices Currently we're linking upper devices to lower ones, which results in upside-down relationship: upper devices seeing lower devices via its upper lists. Fix this by correctly linking lower devices to the upper ones. CC: "David S. Miller" CC: Eric Dumazet CC: Jiri Pirko CC: Alexander Duyck CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 07684e8..5c713f2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4679,8 +4679,8 @@ static int __netdev_upper_dev_link(struct net_device *dev, * versa, and don't forget the devices itself. All of these * links are non-neighbours. */ - list_for_each_entry(i, &upper_dev->upper_dev_list, list) { - list_for_each_entry(j, &dev->lower_dev_list, list) { + list_for_each_entry(i, &dev->lower_dev_list, list) { + list_for_each_entry(j, &upper_dev->upper_dev_list, list) { ret = __netdev_adjacent_dev_link(i->dev, j->dev); if (ret) goto rollback_mesh; -- cgit v0.10.2 From 104a43edb264321a4d41850e98153b4fa8a9ef42 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 2 Sep 2013 11:42:28 -0700 Subject: cnic: Use CHIP_NUM macros from bnx2x.h This eliminates duplication and ensures that all bnx2x chips will be supported. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 4f8a535..3dadc81 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1184,6 +1184,7 @@ error: static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int ctx_blk_size = cp->ethdev->ctx_blk_size; int total_mem, blks, i; @@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) cp->ctx_blks = blks; cp->ctx_blk_size = ctx_blk_size; - if (!BNX2X_CHIP_IS_57710(cp->chip_id)) + if (!CHIP_IS_E1(bp)) cp->ctx_align = 0; else cp->ctx_align = ctx_blk_size; @@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_eth_dev *ethdev = cp->ethdev; u32 start_cid = ethdev->starting_cid; int i, j, n, ret, pages; @@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) cp->iscsi_start_cid = start_cid; cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cp->max_cid_space += dev->max_fcoe_conn; cp->fcoe_init_cid = ethdev->fcoe_init_cid; if (!cp->fcoe_init_cid) @@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; - if (CNIC_SUPPORTS_FCOE(cp)) { + if (CNIC_SUPPORTS_FCOE(bp)) { ret = cnic_alloc_kcq(dev, &cp->kcq2, true); if (ret) goto error; @@ -1679,6 +1681,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], u32 num) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct iscsi_kwqe_conn_offload1 *req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; struct iscsi_kwqe_conn_offload2 *req2 = @@ -1745,7 +1748,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = ETH_P_8021Q; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && + if (BNX2X_CHIP_IS_E2_PLUS(bp) && cp->port_mode == CHIP_2_PORT_MODE) { port = 0; @@ -2716,7 +2719,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], u32 num_wqes) { - struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int i, work, ret; u32 opcode; struct kwqe *kwqe; @@ -2724,7 +2727,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) return -EAGAIN; /* bnx2 is down */ - if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + if (!BNX2X_CHIP_IS_E2_PLUS(bp)) return -EINVAL; for (i = 0; i < num_wqes; ) { @@ -4902,6 +4905,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, struct client_init_ramrod_data *data) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_uio_dev *udev = cp->udev; union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; dma_addr_t buf_map, ring_map = udev->l2_ring_map; @@ -4930,7 +4934,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS; start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + if (BNX2X_CHIP_IS_E2_PLUS(bp)) pbd_e2->parsing_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); else @@ -4967,6 +4971,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, struct client_init_ramrod_data *data) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_uio_dev *udev = cp->udev; struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + BNX2_PAGE_SIZE); @@ -4975,7 +4980,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; int i; u32 cli = cp->ethdev->iscsi_l2_client_id; - int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); + int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); u32 val; dma_addr_t ring_map = udev->l2_ring_map; @@ -5040,7 +5045,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); cp->kcq1.sw_prod_idx = 0; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { struct host_hc_status_block_e2 *sb = cp->status_blk.gen; cp->kcq1.hw_prod_idx_ptr = @@ -5056,7 +5061,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) &sb->sb.running_index[SM_RX_ID]; } - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { struct host_hc_status_block_e2 *sb = cp->status_blk.gen; cp->kcq2.io_addr = BAR_USTRORM_INTMEM + @@ -5091,7 +5096,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) if (ret) return -ENOMEM; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, cp->fcoe_start_cid, 0); @@ -5173,10 +5178,10 @@ static void cnic_init_rings(struct cnic_dev *dev) rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; barrier(); - cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); + cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); off = BAR_USTRORM_INTMEM + - (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? + (BNX2X_CHIP_IS_E2_PLUS(bp) ? USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); @@ -5365,7 +5370,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) cnic_free_irq(dev); - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { idx_off = offsetof(struct hc_status_block_e2, index_values) + (hc_index * sizeof(u16)); @@ -5556,7 +5561,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) cdev->max_iscsi_conn = ethdev->max_iscsi_conn; - if (CNIC_SUPPORTS_FCOE(cp)) { + if (CNIC_SUPPORTS_FCOE(bp)) { cdev->max_fcoe_conn = ethdev->max_fcoe_conn; cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges; } @@ -5576,7 +5581,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) cp->stop_cm = cnic_cm_stop_bnx2x_hw; cp->enable_int = cnic_enable_bnx2x_int; cp->disable_int_sync = cnic_disable_bnx2x_int_sync; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cp->ack_int = cnic_ack_bnx2x_e2_msix; cp->arm_int = cnic_arm_bnx2x_e2_msix; } else { diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index e7a2474..519f587 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -364,47 +364,7 @@ struct bnx2x_bd_chain_next { #define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ -#define BNX2X_CHIP_NUM_57710 0x164e -#define BNX2X_CHIP_NUM_57711 0x164f -#define BNX2X_CHIP_NUM_57711E 0x1650 -#define BNX2X_CHIP_NUM_57712 0x1662 -#define BNX2X_CHIP_NUM_57712E 0x1663 -#define BNX2X_CHIP_NUM_57713 0x1651 -#define BNX2X_CHIP_NUM_57713E 0x1652 -#define BNX2X_CHIP_NUM_57800 0x168a -#define BNX2X_CHIP_NUM_57810 0x168e -#define BNX2X_CHIP_NUM_57840 0x168d - -#define BNX2X_CHIP_NUM(x) (x >> 16) -#define BNX2X_CHIP_IS_57710(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710) -#define BNX2X_CHIP_IS_57711(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711) -#define BNX2X_CHIP_IS_57711E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E) -#define BNX2X_CHIP_IS_E1H(x) \ - (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x)) -#define BNX2X_CHIP_IS_57712(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712) -#define BNX2X_CHIP_IS_57712E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E) -#define BNX2X_CHIP_IS_57713(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713) -#define BNX2X_CHIP_IS_57713E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E) -#define BNX2X_CHIP_IS_57800(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800) -#define BNX2X_CHIP_IS_57810(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810) -#define BNX2X_CHIP_IS_57840(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840) -#define BNX2X_CHIP_IS_E2(x) \ - (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \ - BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x)) -#define BNX2X_CHIP_IS_E3(x) \ - (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \ - BNX2X_CHIP_IS_57840(x)) -#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x)) +#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) #define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \ sizeof(struct eth_rx_bd)) @@ -441,8 +401,6 @@ struct bnx2x_bd_chain_next { #define CNIC_PORT(cp) ((cp)->pfid & 1) #define CNIC_FUNC(cp) ((cp)->func) -#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \ - 0 : (CNIC_FUNC(cp) & 1)) #define CNIC_E1HVN(cp) ((cp)->pfid >> 1) #define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ @@ -450,20 +408,19 @@ struct bnx2x_bd_chain_next { #define BNX2X_SW_CID(x) (x & 0x1ffff) -#define BNX2X_CL_QZONE_ID(cp, cli) \ - (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ - cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) +#define BNX2X_CL_QZONE_ID(bp, cli) \ + (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \ + cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H)) #ifndef MAX_STAT_COUNTER_ID #define MAX_STAT_COUNTER_ID \ - (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ - ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ + (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \ + ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \ MAX_STAT_COUNTER_ID_E1)) #endif -#define CNIC_SUPPORTS_FCOE(cp) \ - (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \ - !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) +#define CNIC_SUPPORTS_FCOE(cp) \ + (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp)) #define CNIC_RAMROD_TMO (HZ / 4) -- cgit v0.10.2 From 5e65789f694f2977fb7169c500b4997772f5278b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 2 Sep 2013 11:42:29 -0700 Subject: cnic: Redefine BNX2X_HW_CID using existing bnx2x macros to avoid duplication of the same logic. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 3dadc81..750b9a6 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1384,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, u32 type, union l5cm_specific_data *l5_data) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct l5cm_spe kwqe; struct kwqe_16 *kwq[1]; u16 type_16; @@ -1391,7 +1392,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, kwqe.hdr.conn_and_cmd_data = cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | - BNX2X_HW_CID(cp, cid))); + BNX2X_HW_CID(bp, cid))); type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & @@ -1690,7 +1691,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; struct cnic_iscsi *iscsi = ctx->proto.iscsi; u32 cid = ctx->cid; - u32 hw_cid = BNX2X_HW_CID(cp, cid); + u32 hw_cid = BNX2X_HW_CID(bp, cid); struct iscsi_context *ictx; struct regpair context_addr; int i, j, n = 2, n_max; @@ -1870,6 +1871,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], struct iscsi_kwqe_conn_offload1 *req1; struct iscsi_kwqe_conn_offload2 *req2; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_context *ctx; struct iscsi_kcqe kcqe; struct kcqe *cqes[1]; @@ -1923,7 +1925,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], } kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; - kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); + kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid); done: cqes[0] = (struct kcqe *) &kcqe; @@ -1959,6 +1961,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; union l5cm_specific_data l5_data; int ret; @@ -1967,7 +1970,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) init_waitqueue_head(&ctx->waitq); ctx->wait_cond = 0; memset(&l5_data, 0, sizeof(l5_data)); - hw_cid = BNX2X_HW_CID(cp, ctx->cid); + hw_cid = BNX2X_HW_CID(bp, ctx->cid); ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, hw_cid, NONE_CONNECTION_TYPE, &l5_data); @@ -2252,11 +2255,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) struct fcoe_stat_ramrod_params *fcoe_stat; union l5cm_specific_data l5_data; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int ret; u32 cid; req = (struct fcoe_kwqe_stat *) kwqe; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); if (!fcoe_stat) @@ -2275,6 +2279,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], { int ret; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); u32 cid; struct fcoe_init_ramrod_params *fcoe_init; struct fcoe_kwqe_init1 *req1; @@ -2319,7 +2324,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; cp->kcq2.sw_prod_idx = 0; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, FCOE_CONNECTION_TYPE, &l5_data); *work = 3; @@ -2332,6 +2337,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], int ret = 0; u32 cid = -1, l5_cid; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct fcoe_kwqe_conn_offload1 *req1; struct fcoe_kwqe_conn_offload2 *req2; struct fcoe_kwqe_conn_offload3 *req3; @@ -2374,7 +2380,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); if (fctx) { - u32 hw_cid = BNX2X_HW_CID(cp, cid); + u32 hw_cid = BNX2X_HW_CID(bp, cid); u32 val; val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, @@ -2398,7 +2404,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); - cid = BNX2X_HW_CID(cp, cid); + cid = BNX2X_HW_CID(bp, cid); ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, FCOE_CONNECTION_TYPE, &l5_data); if (!ret) @@ -2556,13 +2562,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) struct fcoe_kwqe_destroy *req; union l5cm_specific_data l5_data; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int ret; u32 cid; cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); req = (struct fcoe_kwqe_destroy *) kwqe; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); memset(&l5_data, 0, sizeof(l5_data)); ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 519f587..0408ae2 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -401,10 +401,9 @@ struct bnx2x_bd_chain_next { #define CNIC_PORT(cp) ((cp)->pfid & 1) #define CNIC_FUNC(cp) ((cp)->func) -#define CNIC_E1HVN(cp) ((cp)->pfid >> 1) -#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ - (CNIC_E1HVN(cp) << 17) | (x)) +#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_VN(bp) << 17) | (x)) #define BNX2X_SW_CID(x) (x & 0x1ffff) -- cgit v0.10.2 From 5bf945a874d13e4e44c61afaeb7714d07fa7baca Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 2 Sep 2013 11:42:30 -0700 Subject: cnic: Eliminate CNIC_PORT macro and port_mode in local struct. Use BP_PORT and chip_port_mode directly from bnx2x.h to avoid duplication. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 750b9a6..435a167 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1695,7 +1695,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], struct iscsi_context *ictx; struct regpair context_addr; int i, j, n = 2, n_max; - u8 port = CNIC_PORT(cp); + u8 port = BP_PORT(bp); ctx->ctx_flags = 0; if (!req2->num_additional_wqes) @@ -1750,7 +1750,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = ETH_P_8021Q; if (BNX2X_CHIP_IS_E2_PLUS(bp) && - cp->port_mode == CHIP_2_PORT_MODE) { + bp->common.chip_port_mode == CHIP_2_PORT_MODE) { port = 0; } @@ -3050,8 +3050,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance) static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, u16 index, u8 op, u8 update) { - struct cnic_local *cp = dev->cnic_priv; - u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 + COMMAND_REG_INT_ACK); struct igu_ack_register igu_ack; @@ -4231,7 +4231,7 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); u32 pfid = cp->pfid; - u32 port = CNIC_PORT(cp); + u32 port = BP_PORT(bp); cnic_init_bnx2x_mac(dev); cnic_bnx2x_set_tcp_options(dev, 0, 1); @@ -5090,7 +5090,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) u32 pfid; dev->stats_addr = ethdev->addr_drv_info_to_mcp; - cp->port_mode = bp->common.chip_port_mode; cp->pfid = bp->pfid; cp->func = bp->pf_num; @@ -5190,7 +5189,7 @@ static void cnic_init_rings(struct cnic_dev *dev) off = BAR_USTRORM_INTMEM + (BNX2X_CHIP_IS_E2_PLUS(bp) ? USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : - USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); + USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli)); for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 0408ae2..10531f6 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -304,7 +304,6 @@ struct cnic_local { u32 chip_id; int func; u32 pfid; - u8 port_mode; u32 shmem_base; @@ -399,7 +398,6 @@ struct bnx2x_bd_chain_next { #define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H #endif -#define CNIC_PORT(cp) ((cp)->pfid & 1) #define CNIC_FUNC(cp) ((cp)->func) #define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ -- cgit v0.10.2 From a5b3c4ae27f0f6a84a00dae44adee82aca346d3a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 2 Sep 2013 11:42:31 -0700 Subject: cnic: Eliminate local copy of pfid. Use bp->pfid from bnx2x instead to avoid duplication. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 435a167..8142480 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1395,7 +1395,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, BNX2X_HW_CID(bp, cid))); type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & + type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & SPE_HDR_FUNCTION_ID; kwqe.hdr.type = cpu_to_le16(type_16); @@ -1433,7 +1433,6 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps, int en_tcp_dack) { - struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; u16 tstorm_flags = 0; @@ -1446,10 +1445,10 @@ static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps, tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN; CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); + XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags); CNIC_WR16(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); + TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags); } static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) @@ -1458,7 +1457,7 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) struct bnx2x *bp = netdev_priv(dev->netdev); struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; int hq_bds, pages; - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; cp->num_iscsi_tasks = req1->num_tasks_per_conn; cp->num_ccells = req1->num_ccells_per_conn; @@ -1541,9 +1540,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) { struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; - struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; struct iscsi_kcqe kcqe; struct kcqe *cqes[1]; @@ -2078,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, static void cnic_init_bnx2x_mac(struct cnic_dev *dev) { - struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; u8 *mac = dev->mac_addr; CNIC_WR8(dev, BAR_XSTRORM_INTMEM + @@ -2186,7 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id); ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); @@ -4228,9 +4225,8 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) { - struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; u32 port = BP_PORT(bp); cnic_init_bnx2x_mac(dev); @@ -4996,7 +4992,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, data->general.activate_flg = 1; data->general.sp_client_id = cli; data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); - data->general.func_id = cp->pfid; + data->general.func_id = bp->pfid; for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { dma_addr_t buf_map; @@ -5046,7 +5042,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); @@ -5090,11 +5086,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) u32 pfid; dev->stats_addr = ethdev->addr_drv_info_to_mcp; - cp->pfid = bp->pfid; cp->func = bp->pf_num; func = CNIC_FUNC(cp); - pfid = cp->pfid; + pfid = bp->pfid; ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, cp->iscsi_start_cid, 0); @@ -5393,7 +5388,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) *cp->kcq1.hw_prod_idx_ptr = 0; CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); + CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0); CNIC_WR16(dev, cp->kcq1.io_addr, 0); cnic_free_resc(dev); } diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 10531f6..0121a5d 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -303,7 +303,6 @@ struct cnic_local { u32 chip_id; int func; - u32 pfid; u32 shmem_base; -- cgit v0.10.2 From 069281d184f477055ab2414653811f569f12e553 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 2 Sep 2013 11:42:32 -0700 Subject: cnic: Update version to 2.5.18. Signed-off-by: Michael Chan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 95aff76..0658b43 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.17" -#define CNIC_MODULE_RELDATE "July 28, 2013" +#define CNIC_MODULE_VERSION "2.5.18" +#define CNIC_MODULE_RELDATE "Sept 01, 2013" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 -- cgit v0.10.2 From c67c71b4e8cf9cffc66b0a12f93b1c6b9cc1ea4f Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Tue, 3 Sep 2013 08:54:04 +0900 Subject: net: netx-eth: remove unnecessary casting Casting from 'void *' is unnecessary, because casting from 'void *' to any pointer type is automatic. Reported-by: Sergei Shtylyov Signed-off-by: Jingoo Han Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 235fd51..e6f0a43 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -390,7 +390,7 @@ static int netx_eth_drv_probe(struct platform_device *pdev) priv = netdev_priv(ndev); - pdata = (struct netxeth_platform_data *)dev_get_platdata(&pdev->dev); + pdata = dev_get_platdata(&pdev->dev); priv->xc = request_xc(pdata->xcno, &pdev->dev); if (!priv->xc) { dev_err(&pdev->dev, "unable to request xc engine\n"); -- cgit v0.10.2 From 0b536be7b987de14dab63ea565fc1e271a7f3a5f Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 3 Sep 2013 09:55:32 +1000 Subject: ibmveth: Fix little endian issues The hypervisor is big endian, so little endian kernel builds need to byteswap. Signed-off-by: Anton Blanchard Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 70fd559..5d41aee 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -106,7 +106,7 @@ struct ibmveth_stat ibmveth_stats[] = { /* simple methods of getting data from the current rxq entry */ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) { - return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off; + return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); } static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) @@ -132,7 +132,7 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) { - return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length; + return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); } static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 43a794f..84066ba 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -164,14 +164,26 @@ struct ibmveth_adapter { u64 tx_send_failed; }; +/* + * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers, + * so we don't need to byteswap the two elements. However since we use + * a union (ibmveth_buf_desc) to convert from the struct to a u64 we + * do end up with endian specific ordering of the elements and that + * needs correcting. + */ struct ibmveth_buf_desc_fields { +#ifdef __BIG_ENDIAN + u32 flags_len; + u32 address; +#else + u32 address; u32 flags_len; +#endif #define IBMVETH_BUF_VALID 0x80000000 #define IBMVETH_BUF_TOGGLE 0x40000000 #define IBMVETH_BUF_NO_CSUM 0x02000000 #define IBMVETH_BUF_CSUM_GOOD 0x01000000 #define IBMVETH_BUF_LEN_MASK 0x00FFFFFF - u32 address; }; union ibmveth_buf_desc { @@ -180,7 +192,7 @@ union ibmveth_buf_desc { }; struct ibmveth_rx_q_entry { - u32 flags_off; + __be32 flags_off; #define IBMVETH_RXQ_TOGGLE 0x80000000 #define IBMVETH_RXQ_TOGGLE_SHIFT 31 #define IBMVETH_RXQ_VALID 0x40000000 @@ -188,7 +200,8 @@ struct ibmveth_rx_q_entry { #define IBMVETH_RXQ_CSUM_GOOD 0x01000000 #define IBMVETH_RXQ_OFF_MASK 0x0000FFFF - u32 length; + __be32 length; + /* correlator is only used by the OS, no need to byte swap */ u64 correlator; }; -- cgit v0.10.2 From cab6ce9ebe89303bbf5eff442776188070a22771 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 3 Sep 2013 12:02:32 +0300 Subject: caif: add a sanity check to the tty name "tty->name" and "name" are a 64 character buffers. My static checker complains because we add the "cf" on the front so it look like we are copying a 66 character string into a 64 character buffer. Also if the name is larger than IFNAMSIZ (16) it triggers a BUG_ON() inside the call to alloc_netdev(). This is all under CAP_SYS_ADMIN so it's not a security fix, it just adds a little robustness. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 34dea95..88a6a58 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -347,7 +347,9 @@ static int ldisc_open(struct tty_struct *tty) /* release devices to avoid name collision */ ser_release(NULL); - sprintf(name, "cf%s", tty->name); + result = snprintf(name, sizeof(name), "cf%s", tty->name); + if (result >= IFNAMSIZ) + return -EINVAL; dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); if (!dev) return -ENOMEM; -- cgit v0.10.2 From 80aa4e10963cc7d9b5dd5b2568ce28d74af20bf9 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 3 Sep 2013 12:03:40 +0300 Subject: x25: add a sanity check parsing X.25 facilities This was found with a manual audit and I don't have a reproducer. We limit ->calling_len and ->called_len when we get them from copy_from_user() in x25_ioctl() so when they come from skb->data then we should cap them there as well. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 66c63873..b825325 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -156,6 +156,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, case X25_FAC_CALLING_AE: if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) return -1; + if (p[2] > X25_MAX_AE_LEN) + return -1; dte_facs->calling_len = p[2]; memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLING_AE; @@ -163,6 +165,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, case X25_FAC_CALLED_AE: if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) return -1; + if (p[2] > X25_MAX_AE_LEN) + return -1; dte_facs->called_len = p[2]; memcpy(dte_facs->called_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLED_AE; -- cgit v0.10.2 From 0996b7dfc3858a497f2de24c233a560e30fe4a14 Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Tue, 3 Sep 2013 05:07:37 -0400 Subject: qlcnic: Fix sparse warning. This patch fixes warning "warning: symbol 'qlcnic_set_dcb_ops' was not declared. Should it be static?" Signed-off-by: Sucheta Chakraborty Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 2e10e79..d62d5ce 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -248,7 +248,7 @@ static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter) adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; } -void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) +static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) { if (qlcnic_82xx_check(adapter)) adapter->dcb->ops = &qlcnic_82xx_dcb_ops; -- cgit v0.10.2 From 3cc4a6784dfe26cd1aca55ef28b07eadbc09b960 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 3 Sep 2013 12:13:47 +0300 Subject: qlcnic: remove a stray semicolon Just remove a small semicolon. Signed-off-by: Dan Carpenter Acked-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 332aa71..4d7ad00 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1709,7 +1709,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) qlcnic_dev_request_reset(adapter, val->flag); adapter->flags &= ~QLCNIC_FW_RESET_OWNER; break; -; + case QLCNIC_SET_QUIESCENT: case QLCNIC_RESET_QUIESCENT: state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); -- cgit v0.10.2 From cc8c6c1b21c9b1e0f1e89428c07c7c52435cba0c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 3 Sep 2013 18:24:02 +0200 Subject: net: tcp_probe: adapt tbuf size for recent changes With recent changes in tcp_probe module (e.g. f925d0a62d ("net: tcp_probe: add IPv6 support")) we also need to take into account that tbuf needs to be updated as format string will be further expanded. tbuf sits on the stack in tcpprobe_read() function that is invoked when user space reads procfs file /proc/net/tcpprobe, hence not fast path as in jtcp_rcv_established(). Having a size similarly as in sctp_probe module of 256 bytes is fully sufficient for that, we need theoretical maximum of 252 bytes otherwise we could get truncated. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 622a437..1f6aa54 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -218,7 +218,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, return -EINVAL; while (cnt < len) { - char tbuf[164]; + char tbuf[256]; int width; /* Wait for data in buffer */ -- cgit v0.10.2 From c995ae2259ee36caf48bbfacf40111998dacd4af Mon Sep 17 00:00:00 2001 From: Vijay Subramanian Date: Tue, 3 Sep 2013 12:23:22 -0700 Subject: tcp: Change return value of tcp_rcv_established() tcp_rcv_established() returns only one value namely 0. We change the return value to void (as suggested by David Miller). After commit 0c24604b (tcp: implement RFC 5961 4.2), we no longer send RSTs in response to SYNs. We can remove the check and processing on the return value of tcp_rcv_established(). We also fix jtcp_rcv_established() in tcp_probe.c to match that of tcp_rcv_established(). Signed-off-by: Vijay Subramanian Signed-off-by: David S. Miller diff --git a/include/net/tcp.h b/include/net/tcp.h index 6a6a88d..b1aa324 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -371,8 +371,8 @@ extern void tcp_delack_timer_handler(struct sock *sk); extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len); -extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, unsigned int len); +extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, + const struct tcphdr *th, unsigned int len); extern void tcp_rcv_space_adjust(struct sock *sk); extern void tcp_cleanup_rbuf(struct sock *sk, int copied); extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1a84fff..93d7e9d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5049,8 +5049,8 @@ discard: * the rest is checked inline. Fast processing is turned on in * tcp_data_queue when everything is OK. */ -int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, unsigned int len) +void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, + const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); @@ -5127,7 +5127,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); - return 0; + return; } else { /* Header too small */ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; @@ -5220,7 +5220,7 @@ no_ack: if (eaten) kfree_skb_partial(skb, fragstolen); sk->sk_data_ready(sk, 0); - return 0; + return; } } @@ -5236,7 +5236,7 @@ slow_path: */ if (!tcp_validate_incoming(sk, skb, th, 1)) - return 0; + return; step5: if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) @@ -5252,7 +5252,7 @@ step5: tcp_data_snd_check(sk); tcp_ack_snd_check(sk); - return 0; + return; csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); @@ -5260,7 +5260,6 @@ csum_error: discard: __kfree_skb(skb); - return 0; } EXPORT_SYMBOL(tcp_rcv_established); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 09d45d7..b14266b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1799,10 +1799,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) sk->sk_rx_dst = NULL; } } - if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { - rsk = sk; - goto reset; - } + tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); return 0; } diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 1f6aa54..611beab 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -122,8 +122,8 @@ static inline int tcp_probe_avail(void) * Hook inserted to be called before each receive packet. * Note: arguments must match tcp_rcv_established()! */ -static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, unsigned int len) +static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, + const struct tcphdr *th, unsigned int len) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_sock *inet = inet_sk(sk); @@ -172,7 +172,6 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, } jprobe_return(); - return 0; } static struct jprobe tcp_jprobe = { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5bcfadf..9acdced 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1360,8 +1360,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) } } - if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) - goto reset; + tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); if (opt_skb) goto ipv6_pktoptions; return 0; -- cgit v0.10.2 From 775ada6d9f4c9dc440f5aeca00354eb87f6e0696 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 28 Aug 2013 15:14:38 +0200 Subject: netfilter: more strict TCP flag matching in SYNPROXY Its seems Patrick missed to incoorporate some of my requested changes during review v2 of SYNPROXY netfilter module. Which were, to avoid SYN+ACK packets to enter the path, meant for the ACK packet from the client (from the 3WHS). Further there were a bug in ip6t_SYNPROXY.c, for matching SYN packets that didn't exclude the ACK flag. Go a step further with SYN packet/flag matching by excluding flags ACK+FIN+RST, in both IPv4 and IPv6 modules. The intented usage of SYNPROXY is as follows: (gracefully describing usage in commit) iptables -t raw -A PREROUTING -i eth0 -p tcp --dport 80 --syn -j NOTRACK iptables -A INPUT -i eth0 -p tcp --dport 80 -m state UNTRACKED,INVALID \ -j SYNPROXY --sack-perm --timestamp --mss 1480 --wscale 7 --ecn echo 0 > /proc/sys/net/netfilter/nf_conntrack_tcp_loose This does filter SYN flags early, for packets in the UNTRACKED state, but packets in the INVALID state with other TCP flags could still reach the module, thus this stricter flag matching is still needed. Signed-off-by: Jesper Dangaard Brouer Acked-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 94371db..90e489e 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -269,7 +269,7 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) synproxy_parse_options(skb, par->thoff, th, &opts); - if (th->syn && !th->ack) { + if (th->syn && !(th->ack || th->fin || th->rst)) { /* Initial SYN from client */ this_cpu_inc(snet->stats->syn_received); @@ -285,7 +285,7 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(skb, th, &opts); - } else if (th->ack && !(th->fin || th->rst)) + } else if (th->ack && !(th->fin || th->rst || th->syn)) /* ACK from client */ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 4270a9b..a5af0bf 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -284,7 +284,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) synproxy_parse_options(skb, par->thoff, th, &opts); - if (th->syn) { + if (th->syn && !(th->ack || th->fin || th->rst)) { /* Initial SYN from client */ this_cpu_inc(snet->stats->syn_received); @@ -300,7 +300,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(skb, th, &opts); - } else if (th->ack && !(th->fin || th->rst)) + } else if (th->ack && !(th->fin || th->rst || th->syn)) /* ACK from client */ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); -- cgit v0.10.2 From f4de4c89d89df5ead42de9fea895f5b8155270da Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 29 Aug 2013 10:32:09 +0200 Subject: netfilter: synproxy_core: fix warning in __nf_ct_ext_add_length() With CONFIG_NETFILTER_DEBUG we get the following warning during SYNPROXY init: [ 80.558906] WARNING: CPU: 1 PID: 4833 at net/netfilter/nf_conntrack_extend.c:80 __nf_ct_ext_add_length+0x217/0x220 [nf_conntrack]() The reason is that the conntrack template is set to confirmed before adding the extension and it is invalid to add extensions to already confirmed conntracks. Fix by adding the extensions before setting the conntrack to confirmed. Reported-by: Jesper Dangaard Brouer Signed-off-by: Patrick McHardy Acked-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index d23dc79..6fd967c 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -356,12 +356,12 @@ static int __net_init synproxy_net_init(struct net *net) goto err1; } - __set_bit(IPS_TEMPLATE_BIT, &ct->status); - __set_bit(IPS_CONFIRMED_BIT, &ct->status); if (!nfct_seqadj_ext_add(ct)) goto err2; if (!nfct_synproxy_ext_add(ct)) goto err2; + __set_bit(IPS_TEMPLATE_BIT, &ct->status); + __set_bit(IPS_CONFIRMED_BIT, &ct->status); snet->tmpl = ct; -- cgit v0.10.2 From 7cc9eb6ef78d0dcb97d543ea19966486e98afa0b Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 29 Aug 2013 12:18:46 +0200 Subject: netfilter: SYNPROXY: let unrelated packets continue Packets reaching SYNPROXY were default dropped, as they were most likely invalid (given the recommended state matching). This patch, changes SYNPROXY target to let packets, not consumed, continue being processed by the stack. This will be more in line other target modules. As it will allow more flexible configurations of handling, logging or matching on packets in INVALID states. Signed-off-by: Jesper Dangaard Brouer Acked-by: Patrick McHardy Signed-off-by: Pablo Neira Ayuso diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 90e489e..67e17dc 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -285,11 +285,15 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(skb, th, &opts); - } else if (th->ack && !(th->fin || th->rst || th->syn)) + return NF_DROP; + + } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); + return NF_DROP; + } - return NF_DROP; + return XT_CONTINUE; } static unsigned int ipv4_synproxy_hook(unsigned int hooknum, diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index a5af0bf..19cfea8 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -300,11 +300,15 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(skb, th, &opts); - } else if (th->ack && !(th->fin || th->rst || th->syn)) + return NF_DROP; + + } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); + return NF_DROP; + } - return NF_DROP; + return XT_CONTINUE; } static unsigned int ipv6_synproxy_hook(unsigned int hooknum, -- cgit v0.10.2 From 53ea6c7e2d6ce50c73544ef432fb2206ca60fc38 Mon Sep 17 00:00:00 2001 From: Todd Fujinaka Date: Fri, 23 Aug 2013 07:49:00 +0000 Subject: igb: Don't look for a PBA in the iNVM when flashless When a part is flashless, do not look for a PBA in the iNVM. Signed-off-by: Todd Fujinaka Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index df33c4b..78f49cf 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2367,7 +2367,14 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Width x1" : "unknown"), netdev->dev_addr); } - ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); + if ((hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + if (ret_val) strcpy(part_str, "Unknown"); dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); -- cgit v0.10.2 From aa9b8cc44409b140c0dadd3776e99220edf3384a Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 28 Aug 2013 02:22:43 +0000 Subject: igb: Implementation of 1-sec delay for i210 devices This patch adds 1 sec delay mechanism to i210 device family, in order to avoid erroneous link issue with the link partner. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index c1fae7a..6807b09 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -448,6 +448,8 @@ struct igb_adapter { struct i2c_client *i2c_client; u32 rss_indir_tbl_init; u8 rss_indir_tbl[IGB_RETA_SIZE]; + + unsigned long link_check_timeout; }; #define IGB_FLAG_HAS_MSI (1 << 0) @@ -459,6 +461,7 @@ struct igb_adapter { #define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) #define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) #define IGB_FLAG_WOL_SUPPORTED (1 << 8) +#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) /* DMA Coalescing defines */ #define IGB_MIN_TXPBSIZE 20408 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 78f49cf..b209ca5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1671,6 +1671,8 @@ void igb_down(struct igb_adapter *adapter) igb_irq_disable(adapter); + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + for (i = 0; i < adapter->num_q_vectors; i++) { napi_synchronize(&(adapter->q_vector[i]->napi)); napi_disable(&(adapter->q_vector[i]->napi)); @@ -3886,6 +3888,17 @@ bool igb_has_link(struct igb_adapter *adapter) break; } + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + return link_active; } @@ -3930,6 +3943,14 @@ static void igb_watchdog_task(struct work_struct *work) int i; link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + if (link) { /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); @@ -4054,9 +4075,14 @@ static void igb_watchdog_task(struct work_struct *work) igb_ptp_rx_hang(adapter); /* Reset the timer */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } } enum latency_range { -- cgit v0.10.2 From 99af4729c4c5d67dafccb2a1ba29b95cf6f981c2 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 28 Aug 2013 02:22:58 +0000 Subject: igb: New PHY_ID for i354 device This patch changes PHY_ID for i354 device, now using M88E1543 instead of M88E1545. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index d398fad..b0b33a1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -176,7 +176,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) /* Verify phy id and set remaining function pointers */ switch (phy->id) { - case M88E1545_E_PHY_ID: + case M88E1543_E_PHY_ID: case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1111_I_PHY_ID: @@ -1448,7 +1448,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) switch (hw->phy.id) { case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: - case M88E1545_E_PHY_ID: + case M88E1543_E_PHY_ID: case I210_I_PHY_ID: ret_val = igb_copper_link_setup_m88_gen2(hw); break; @@ -2477,28 +2477,28 @@ s32 igb_set_eee_i354(struct e1000_hw *hw) u16 phy_data; if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1545_E_PHY_ID)) + (phy->id != M88E1543_E_PHY_ID)) goto out; if (!hw->dev_spec._82575.eee_disable) { /* Switch to PHY page 18. */ - ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18); + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); if (ret_val) goto out; - ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1, + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, &phy_data); if (ret_val) goto out; - phy_data |= E1000_M88E1545_EEE_CTRL_1_MS; - ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1, + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, phy_data); if (ret_val) goto out; /* Return the PHY to page 0. */ - ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0); + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); if (ret_val) goto out; @@ -2549,7 +2549,7 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) /* Check if EEE is supported on this device. */ if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1545_E_PHY_ID)) + (phy->id != M88E1543_E_PHY_ID)) goto out; ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 60559af..978eca3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -787,7 +787,7 @@ #define I350_I_PHY_ID 0x015403B0 #define M88_VENDOR 0x0141 #define I210_I_PHY_ID 0x01410C00 -#define M88E1545_E_PHY_ID 0x01410EA0 +#define M88E1543_E_PHY_ID 0x01410EA0 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ @@ -909,9 +909,9 @@ #define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ #define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ #define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ -#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */ -#define E1000_M88E1545_EEE_CTRL_1 0x0 -#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 #define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 6046194..5adccbc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1806,7 +1806,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) phy->max_cable_length = phy_data / (is_cm ? 100 : 1); phy->cable_length = phy_data / (is_cm ? 100 : 1); break; - case M88E1545_E_PHY_ID: + case M88E1543_E_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, -- cgit v0.10.2 From dfc707558bb635db98fc6ed8d47c737091707af6 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 28 Aug 2013 02:22:48 +0000 Subject: igb: M88E1543 PHY downshift implementation This patch implements downshift mechanism for M88E1543 PHY, so that downshift is disabled first during link setup process, and later enabled if we are master and downshift link is negotiated. Also cleaned up return code implementation. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 5adccbc..e726675 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -731,15 +731,13 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) s32 ret_val; u16 phy_data; - if (phy->reset_disable) { - ret_val = 0; - goto out; - } + if (phy->reset_disable) + return 0; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) - goto out; + return ret_val; /* Options: * MDI/MDI-X = 0 (default) @@ -780,23 +778,36 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) - goto out; + return ret_val; /* Commit the changes. */ ret_val = igb_phy_sw_reset(hw); if (ret_val) { hw_dbg("Error committing the PHY changes\n"); - goto out; + return ret_val; } -out: - return ret_val; + return 0; } /** -- cgit v0.10.2 From db476e85118e2f5b24f4ccc0bdb42f0c00bde83a Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 28 Aug 2013 02:22:53 +0000 Subject: igb: No PHPM support in i354 devices PHY Power Management does not exist for i354 device. So, there is no need to read and write this register or clear go link Disconnect bit, which could cause a lot of issues. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b0b33a1..d796e29 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1421,11 +1421,18 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); wr32(E1000_CTRL, ctrl); - /* Clear Go Link Disconnect bit */ - if (hw->mac.type >= e1000_82580) { + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); phpm_reg &= ~E1000_82580_PM_GO_LINKD; wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; } ret_val = igb_setup_serdes_link_82575(hw); -- cgit v0.10.2 From f1b4d6214b04caed45f0938a1d769b0d8fe79a3b Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 28 Aug 2013 02:23:04 +0000 Subject: igb: Support to get 2_5G link status for appropriate media type Since i354 2.5Gb devices are not Copper media type but SerDes, so this patch changes the way we detect speed/duplex link info for this device. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index d796e29..68dd7c8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1217,7 +1217,7 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex) { struct e1000_mac_info *mac = &hw->mac; - u32 pcs; + u32 pcs, status; /* Set up defaults for the return values of this function */ mac->serdes_has_link = false; @@ -1238,20 +1238,31 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, mac->serdes_has_link = true; /* Detect and store PCS speed */ - if (pcs & E1000_PCS_LSTS_SPEED_1000) { + if (pcs & E1000_PCS_LSTS_SPEED_1000) *speed = SPEED_1000; - } else if (pcs & E1000_PCS_LSTS_SPEED_100) { + else if (pcs & E1000_PCS_LSTS_SPEED_100) *speed = SPEED_100; - } else { + else *speed = SPEED_10; - } /* Detect and store PCS duplex */ - if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) *duplex = FULL_DUPLEX; - } else { + else *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = rd32(E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + hw_dbg("2500 Mbs, "); + hw_dbg("Full Duplex\n"); + } } + } return 0; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index bab556a..f0dfd41 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1171,17 +1171,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, hw_dbg("Half Duplex\n"); } - /* Check if it is an I354 2.5Gb backplane connection. */ - if (hw->mac.type == e1000_i354) { - if ((status & E1000_STATUS_2P5_SKU) && - !(status & E1000_STATUS_2P5_SKU_OVER)) { - *speed = SPEED_2500; - *duplex = FULL_DUPLEX; - hw_dbg("2500 Mbs, "); - hw_dbg("Full Duplex\n"); - } - } - return 0; } -- cgit v0.10.2 From 1205e1fa615805c9efa97303b552cf445965752a Mon Sep 17 00:00:00 2001 From: Phil Oester Date: Sun, 1 Sep 2013 08:32:21 -0700 Subject: netfilter: xt_TCPMSS: correct return value in tcpmss_mangle_packet In commit b396966c4 (netfilter: xt_TCPMSS: Fix missing fragmentation handling), I attempted to add safe fragment handling to xt_TCPMSS. However, Andy Padavan of Project N56U correctly points out that returning XT_CONTINUE in this function does not work. The callers (tcpmss_tg[46]) expect to receive a value of 0 in order to return XT_CONTINUE. Signed-off-by: Phil Oester Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 6113cc7..cd24290 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -60,7 +60,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, /* This is a fragment, no TCP header is available */ if (par->fragoff != 0) - return XT_CONTINUE; + return 0; if (!skb_make_writable(skb, skb->len)) return -1; -- cgit v0.10.2 From f6878e39c726e13e1d0dfed863dcce81810e4a56 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Wed, 28 Aug 2013 02:23:09 +0000 Subject: igb: Get speed and duplex for 1G non_copper devices This patch changes how we get speed/duplex for non_copper devices; it now uses pcs register to get current speed and duplex instead of using generic status register that we use to detect speed/duplex for copper devices. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 68dd7c8..79b5835 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1141,6 +1141,31 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) } /** + * igb_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = igb_get_speed_and_duplex_copper(hw, speed, + duplex); + + return ret_val; +} + +/** * igb_check_for_link_82575 - Check for link * @hw: pointer to the HW structure * @@ -2723,7 +2748,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = { .check_for_link = igb_check_for_link_82575, .rar_set = igb_rar_set, .read_mac_addr = igb_read_mac_addr_82575, - .get_speed_and_duplex = igb_get_speed_and_duplex_copper, + .get_speed_and_duplex = igb_get_link_up_info_82575, #ifdef CONFIG_IGB_HWMON .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, -- cgit v0.10.2 From 41fcfbea0c11a6041430108f35bd23d885ee3bca Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Fri, 30 Aug 2013 23:49:36 +0000 Subject: igb: Implementation to report advertised/supported link on i354 devices This patch changes the way we report supported/advertised link for i354 devices, especially for 2.5 GB. Instead of reporting 2.5 GB for all i354 devices erroneously, check first, if it is 2.5 GB capable. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index ce9b5a9..48cbc83 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -172,10 +172,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) SUPPORTED_Autoneg | SUPPORTED_Pause); ecmd->advertising = ADVERTISED_FIBRE; - if (hw->mac.type == e1000_i354) { - ecmd->supported |= SUPPORTED_2500baseX_Full; - ecmd->advertising |= ADVERTISED_2500baseX_Full; - } + if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) { ecmd->supported |= SUPPORTED_1000baseT_Full; ecmd->advertising |= ADVERTISED_1000baseT_Full; @@ -209,16 +206,23 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) { - if ((hw->mac.type == e1000_i354) && - (status & E1000_STATUS_2P5_SKU) && - !(status & E1000_STATUS_2P5_SKU_OVER)) - ecmd->speed = SPEED_2500; - else if (status & E1000_STATUS_SPEED_1000) + if (hw->mac.type == e1000_i354) { + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + ecmd->supported = SUPPORTED_2500baseX_Full; + ecmd->advertising = ADVERTISED_2500baseX_Full; + ecmd->speed = SPEED_2500; + } else { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } + } else if (status & E1000_STATUS_SPEED_1000) { ecmd->speed = SPEED_1000; - else if (status & E1000_STATUS_SPEED_100) + } else if (status & E1000_STATUS_SPEED_100) { ecmd->speed = SPEED_100; - else + } else { ecmd->speed = SPEED_10; + } if ((status & E1000_STATUS_FD) || hw->phy.media_type != e1000_media_type_copper) ecmd->duplex = DUPLEX_FULL; -- cgit v0.10.2 From 66f40b8a294c5266c2bfc1f1ed2eb8b2bb00d5e8 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 22 Aug 2013 14:23:10 +0000 Subject: igb: Update version number This patch updates igb driver version to 5.0.5 Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b209ca5..8cf44f2 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -62,7 +62,7 @@ #define MAJ 5 #define MIN 0 -#define BUILD 3 +#define BUILD 5 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; -- cgit v0.10.2 From 42a5a5c128ad33fe25867b838a4dade127b57aee Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 4 Sep 2013 18:07:27 +0300 Subject: sfc: check for allocation failure It upsets static analyzers when we don't check for allocation failure. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 8685f99..ff5d322 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -893,6 +893,8 @@ static int falcon_mtd_probe(struct efx_nic *efx) /* Allocate space for maximum number of partitions */ parts = kcalloc(2, sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; n_parts = 0; spi = &nic_data->spi_flash; -- cgit v0.10.2 From cfd280c91253cc28e4919e349fa7a813b63e71e8 Mon Sep 17 00:00:00 2001 From: Carlos O'Donell Date: Thu, 15 Aug 2013 17:28:10 +0800 Subject: net: sync some IP headers with glibc Solution: ========= - Synchronize linux's `include/uapi/linux/in6.h' with glibc's `inet/netinet/in.h'. - Synchronize glibc's `inet/netinet/in.h with linux's `include/uapi/linux/in6.h'. - Allow including the headers in either other. - First header included defines the structures and macros. Details: ======== The kernel promises not to break the UAPI ABI so I don't see why we can't just have the two userspace headers coordinate? If you include the kernel headers first you get those, and if you include the glibc headers first you get those, and the following patch arranges a coordination and synchronization between the two. Let's handle `include/uapi/linux/in6.h' from linux, and `inet/netinet/in.h' from glibc and ensure they compile in any order and preserve the required ABI. These two patches pass the following compile tests: cat >> test1.c <> test2.c < Cc: Thomas Backlund Cc: libc-alpha@sourceware.org Cc: YOSHIFUJI Hideaki Cc: David S. Miller Tested-by: Cong Wang Signed-off-by: Carlos O'Donell Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 997f9f2..e7c94ee 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -227,6 +227,7 @@ header-y += kvm_para.h endif header-y += l2tp.h +header-y += libc-compat.h header-y += limits.h header-y += llc.h header-y += loop.h diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 9edb441..f9e8e49 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -24,30 +24,53 @@ /* Standard well-defined IP protocols. */ enum { IPPROTO_IP = 0, /* Dummy protocol for TCP */ +#define IPPROTO_IP IPPROTO_IP IPPROTO_ICMP = 1, /* Internet Control Message Protocol */ +#define IPPROTO_ICMP IPPROTO_ICMP IPPROTO_IGMP = 2, /* Internet Group Management Protocol */ +#define IPPROTO_IGMP IPPROTO_IGMP IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */ +#define IPPROTO_IPIP IPPROTO_IPIP IPPROTO_TCP = 6, /* Transmission Control Protocol */ +#define IPPROTO_TCP IPPROTO_TCP IPPROTO_EGP = 8, /* Exterior Gateway Protocol */ +#define IPPROTO_EGP IPPROTO_EGP IPPROTO_PUP = 12, /* PUP protocol */ +#define IPPROTO_PUP IPPROTO_PUP IPPROTO_UDP = 17, /* User Datagram Protocol */ +#define IPPROTO_UDP IPPROTO_UDP IPPROTO_IDP = 22, /* XNS IDP protocol */ +#define IPPROTO_IDP IPPROTO_IDP + IPPROTO_TP = 29, /* SO Transport Protocol Class 4 */ +#define IPPROTO_TP IPPROTO_TP IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */ - IPPROTO_RSVP = 46, /* RSVP protocol */ +#define IPPROTO_DCCP IPPROTO_DCCP + IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */ +#define IPPROTO_IPV6 IPPROTO_IPV6 + IPPROTO_RSVP = 46, /* RSVP Protocol */ +#define IPPROTO_RSVP IPPROTO_RSVP IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */ - - IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */ - - IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */ - IPPROTO_AH = 51, /* Authentication Header protocol */ - IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */ - IPPROTO_PIM = 103, /* Protocol Independent Multicast */ - - IPPROTO_COMP = 108, /* Compression Header protocol */ - IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */ +#define IPPROTO_GRE IPPROTO_GRE + IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */ +#define IPPROTO_ESP IPPROTO_ESP + IPPROTO_AH = 51, /* Authentication Header protocol */ +#define IPPROTO_AH IPPROTO_AH + IPPROTO_MTP = 92, /* Multicast Transport Protocol */ +#define IPPROTO_MTP IPPROTO_MTP + IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */ +#define IPPROTO_BEETPH IPPROTO_BEETPH + IPPROTO_ENCAP = 98, /* Encapsulation Header */ +#define IPPROTO_ENCAP IPPROTO_ENCAP + IPPROTO_PIM = 103, /* Protocol Independent Multicast */ +#define IPPROTO_PIM IPPROTO_PIM + IPPROTO_COMP = 108, /* Compression Header Protocol */ +#define IPPROTO_COMP IPPROTO_COMP + IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */ +#define IPPROTO_SCTP IPPROTO_SCTP IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */ - - IPPROTO_RAW = 255, /* Raw IP packets */ +#define IPPROTO_UDPLITE IPPROTO_UDPLITE + IPPROTO_RAW = 255, /* Raw IP packets */ +#define IPPROTO_RAW IPPROTO_RAW IPPROTO_MAX }; diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index 53b1d56..440d5c4 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -22,22 +22,30 @@ #define _UAPI_LINUX_IN6_H #include +#include /* * IPv6 address structure */ +#if __UAPI_DEF_IN6_ADDR struct in6_addr { union { __u8 u6_addr8[16]; +#if __UAPI_DEF_IN6_ADDR_ALT __be16 u6_addr16[8]; __be32 u6_addr32[4]; +#endif } in6_u; #define s6_addr in6_u.u6_addr8 +#if __UAPI_DEF_IN6_ADDR_ALT #define s6_addr16 in6_u.u6_addr16 #define s6_addr32 in6_u.u6_addr32 +#endif }; +#endif /* __UAPI_DEF_IN6_ADDR */ +#if __UAPI_DEF_SOCKADDR_IN6 struct sockaddr_in6 { unsigned short int sin6_family; /* AF_INET6 */ __be16 sin6_port; /* Transport layer port # */ @@ -45,7 +53,9 @@ struct sockaddr_in6 { struct in6_addr sin6_addr; /* IPv6 address */ __u32 sin6_scope_id; /* scope id (new in RFC2553) */ }; +#endif /* __UAPI_DEF_SOCKADDR_IN6 */ +#if __UAPI_DEF_IPV6_MREQ struct ipv6_mreq { /* IPv6 multicast address of group */ struct in6_addr ipv6mr_multiaddr; @@ -53,6 +63,7 @@ struct ipv6_mreq { /* local IPv6 address of interface */ int ipv6mr_ifindex; }; +#endif /* __UAPI_DEF_IVP6_MREQ */ #define ipv6mr_acaddr ipv6mr_multiaddr @@ -114,13 +125,24 @@ struct in6_flowlabel_req { /* * IPV6 extension headers */ -#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */ -#define IPPROTO_ROUTING 43 /* IPv6 routing header */ -#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */ -#define IPPROTO_ICMPV6 58 /* ICMPv6 */ -#define IPPROTO_NONE 59 /* IPv6 no next header */ -#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */ -#define IPPROTO_MH 135 /* IPv6 mobility header */ +#if __UAPI_DEF_IPPROTO_V6 +enum { + IPPROTO_HOPOPTS = 0, /* IPv6 hop-by-hop options */ +#define IPPROTO_HOPOPTS IPPROTO_HOPOPTS + IPPROTO_ROUTING = 43, /* IPv6 routing header */ +#define IPPROTO_ROUTING IPPROTO_ROUTING + IPPROTO_FRAGMENT = 44, /* IPv6 fragmentation header */ +#define IPPROTO_FRAGMENT IPPROTO_FRAGMENT + IPPROTO_ICMPV6 = 58, /* ICMPv6 */ +#define IPPROTO_ICMPV6 IPPROTO_ICMPV6 + IPPROTO_NONE = 59, /* IPv6 no next header */ +#define IPPROTO_NONE IPPROTO_NONE + IPPROTO_DSTOPTS = 60, /* IPv6 destination options */ +#define IPPROTO_DSTOPTS IPPROTO_DSTOPTS + IPPROTO_MH = 135, /* IPv6 mobility header */ +#define IPPROTO_MH IPPROTO_MH +}; +#endif /* __UAPI_DEF_IPPROTO_V6 */ /* * IPv6 TLV options. diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h new file mode 100644 index 0000000..335e8a7 --- /dev/null +++ b/include/uapi/linux/libc-compat.h @@ -0,0 +1,103 @@ +/* + * Compatibility interface for userspace libc header coordination: + * + * Define compatibility macros that are used to control the inclusion or + * exclusion of UAPI structures and definitions in coordination with another + * userspace C library. + * + * This header is intended to solve the problem of UAPI definitions that + * conflict with userspace definitions. If a UAPI header has such conflicting + * definitions then the solution is as follows: + * + * * Synchronize the UAPI header and the libc headers so either one can be + * used and such that the ABI is preserved. If this is not possible then + * no simple compatibility interface exists (you need to write translating + * wrappers and rename things) and you can't use this interface. + * + * Then follow this process: + * + * (a) Include libc-compat.h in the UAPI header. + * e.g. #include + * This include must be as early as possible. + * + * (b) In libc-compat.h add enough code to detect that the comflicting + * userspace libc header has been included first. + * + * (c) If the userspace libc header has been included first define a set of + * guard macros of the form __UAPI_DEF_FOO and set their values to 1, else + * set their values to 0. + * + * (d) Back in the UAPI header with the conflicting definitions, guard the + * definitions with: + * #if __UAPI_DEF_FOO + * ... + * #endif + * + * This fixes the situation where the linux headers are included *after* the + * libc headers. To fix the problem with the inclusion in the other order the + * userspace libc headers must be fixed like this: + * + * * For all definitions that conflict with kernel definitions wrap those + * defines in the following: + * #if !__UAPI_DEF_FOO + * ... + * #endif + * + * This prevents the redefinition of a construct already defined by the kernel. + */ +#ifndef _UAPI_LIBC_COMPAT_H +#define _UAPI_LIBC_COMPAT_H + +/* We have included glibc headers... */ +#if defined(__GLIBC__) + +/* Coordinate with glibc netinet/in.h header. */ +#if defined(_NETINET_IN_H) + +/* GLIBC headers included first so don't define anything + * that would already be defined. */ +#define __UAPI_DEF_IN6_ADDR 0 +/* The exception is the in6_addr macros which must be defined + * if the glibc code didn't define them. This guard matches + * the guard in glibc/inet/netinet/in.h which defines the + * additional in6_addr macros e.g. s6_addr16, and s6_addr32. */ +#if defined(__USE_MISC) || defined (__USE_GNU) +#define __UAPI_DEF_IN6_ADDR_ALT 0 +#else +#define __UAPI_DEF_IN6_ADDR_ALT 1 +#endif +#define __UAPI_DEF_SOCKADDR_IN6 0 +#define __UAPI_DEF_IPV6_MREQ 0 +#define __UAPI_DEF_IPPROTO_V6 0 + +#else + +/* Linux headers included first, and we must define everything + * we need. The expectation is that glibc will check the + * __UAPI_DEF_* defines and adjust appropriately. */ +#define __UAPI_DEF_IN6_ADDR 1 +/* We unconditionally define the in6_addr macros and glibc must + * coordinate. */ +#define __UAPI_DEF_IN6_ADDR_ALT 1 +#define __UAPI_DEF_SOCKADDR_IN6 1 +#define __UAPI_DEF_IPV6_MREQ 1 +#define __UAPI_DEF_IPPROTO_V6 1 + +#endif /* _NETINET_IN_H */ + + +/* If we did not see any headers from any supported C libraries, + * or we are being included in the kernel, then define everything + * that we need. */ +#else /* !defined(__GLIBC__) */ + +/* Definitions for in6.h */ +#define __UAPI_DEF_IN6_ADDR 1 +#define __UAPI_DEF_IN6_ADDR_ALT 1 +#define __UAPI_DEF_SOCKADDR_IN6 1 +#define __UAPI_DEF_IPV6_MREQ 1 +#define __UAPI_DEF_IPPROTO_V6 1 + +#endif /* __GLIBC__ */ + +#endif /* _UAPI_LIBC_COMPAT_H */ -- cgit v0.10.2 From 0a171933a4b5d6706b782271ac68462c97c1ac50 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 2 Sep 2013 11:54:21 +0200 Subject: drivers:net: delete premature free_irq Free_irq is not needed if there has been no request_irq. Free_irq is removed from both the probe and remove functions. The correct request_irq and free_irq are found in the open and close functions. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression e; @@ *e = platform_get_irq(...); ... when != request_irq(e,...) *free_irq(e,...) // Signed-off-by: Julia Lawall Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index dcfe58f..79645f7 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -1014,7 +1014,7 @@ static int w90p910_ether_probe(struct platform_device *pdev) if (ether->rxirq < 0) { dev_err(&pdev->dev, "failed to get ether rx irq\n"); error = -ENXIO; - goto failed_free_txirq; + goto failed_free_io; } platform_set_drvdata(pdev, dev); @@ -1023,7 +1023,7 @@ static int w90p910_ether_probe(struct platform_device *pdev) if (IS_ERR(ether->clk)) { dev_err(&pdev->dev, "failed to get ether clock\n"); error = PTR_ERR(ether->clk); - goto failed_free_rxirq; + goto failed_free_io; } ether->rmiiclk = clk_get(&pdev->dev, "RMII"); @@ -1049,10 +1049,6 @@ failed_put_rmiiclk: clk_put(ether->rmiiclk); failed_put_clk: clk_put(ether->clk); -failed_free_rxirq: - free_irq(ether->rxirq, pdev); -failed_free_txirq: - free_irq(ether->txirq, pdev); failed_free_io: iounmap(ether->reg); failed_free_mem: @@ -1075,9 +1071,6 @@ static int w90p910_ether_remove(struct platform_device *pdev) iounmap(ether->reg); release_mem_region(ether->res->start, resource_size(ether->res)); - free_irq(ether->txirq, dev); - free_irq(ether->rxirq, dev); - del_timer_sync(ðer->check_timer); free_netdev(dev); -- cgit v0.10.2 From c08751c851b78514f6ec5f77f7cbebaac63d15c0 Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Mon, 2 Sep 2013 15:58:25 +0200 Subject: net: sctp: Fix data chunk fragmentation for MTU values which are not multiple of 4 net: sctp: Fix data chunk fragmentation for MTU values which are not multiple of 4 Initially the problem was observed with ipsec, but later it became clear that SCTP data chunk fragmentation algorithm has problems with MTU values which are not multiple of 4. Test program was used which just transmits 2000 bytes long packets to other host. tcpdump was used to observe re-fragmentation in IP layer after SCTP already fragmented data chunks. With MTU 1500: 12:54:34.082904 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto SCTP (132), length 1500) 10.151.38.153.39303 > 10.151.24.91.54321: sctp (1) [DATA] (B) [TSN: 2366088589] [SID: 0] [SSEQ 1] [PPID 0x0] 12:54:34.082933 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto SCTP (132), length 596) 10.151.38.153.39303 > 10.151.24.91.54321: sctp (1) [DATA] (E) [TSN: 2366088590] [SID: 0] [SSEQ 1] [PPID 0x0] 12:54:34.090576 IP (tos 0x2,ECT(0), ttl 63, id 0, offset 0, flags [DF], proto SCTP (132), length 48) 10.151.24.91.54321 > 10.151.38.153.39303: sctp (1) [SACK] [cum ack 2366088590] [a_rwnd 79920] [#gap acks 0] [#dup tsns 0] With MTU 1499: 13:02:49.955220 IP (tos 0x2,ECT(0), ttl 64, id 48215, offset 0, flags [+], proto SCTP (132), length 1492) 10.151.38.153.39084 > 10.151.24.91.54321: sctp[|sctp] 13:02:49.955249 IP (tos 0x2,ECT(0), ttl 64, id 48215, offset 1472, flags [none], proto SCTP (132), length 28) 10.151.38.153 > 10.151.24.91: ip-proto-132 13:02:49.955262 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto SCTP (132), length 600) 10.151.38.153.39084 > 10.151.24.91.54321: sctp (1) [DATA] (E) [TSN: 404355346] [SID: 0] [SSEQ 1] [PPID 0x0] 13:02:49.956770 IP (tos 0x2,ECT(0), ttl 63, id 0, offset 0, flags [DF], proto SCTP (132), length 48) 10.151.24.91.54321 > 10.151.38.153.39084: sctp (1) [SACK] [cum ack 404355346] [a_rwnd 79920] [#gap acks 0] [#dup tsns 0] Here problem in data portion limit calculation leads to re-fragmentation in IP, which is sub-optimal. The problem is max_data initial value, which doesn't take into account the fact, that data chunk must be padded to 4-bytes boundary. It's enough to correct max_data, because all later adjustments are correctly aligned to 4-bytes boundary. After the fix is applied, everything is fragmented correctly for uneven MTUs: 15:16:27.083881 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto SCTP (132), length 1496) 10.151.38.153.53417 > 10.151.24.91.54321: sctp (1) [DATA] (B) [TSN: 3077098183] [SID: 0] [SSEQ 1] [PPID 0x0] 15:16:27.083907 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto SCTP (132), length 600) 10.151.38.153.53417 > 10.151.24.91.54321: sctp (1) [DATA] (E) [TSN: 3077098184] [SID: 0] [SSEQ 1] [PPID 0x0] 15:16:27.085640 IP (tos 0x2,ECT(0), ttl 63, id 0, offset 0, flags [DF], proto SCTP (132), length 48) 10.151.24.91.54321 > 10.151.38.153.53417: sctp (1) [SACK] [cum ack 3077098184] [a_rwnd 79920] [#gap acks 0] [#dup tsns 0] The bug was there for years already, but - is a performance issue, the packets are still transmitted - doesn't show up with default MTU 1500, but possibly with ipsec (MTU 1438) Signed-off-by: Alexander Sverdlin Acked-by: Vlad Yasevich Acked-by: Neil Horman Signed-off-by: David S. Miller diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index bd0bdd0..7bd5ed4 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -195,9 +195,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, /* This is the biggest possible DATA chunk that can fit into * the packet */ - max_data = asoc->pathmtu - + max_data = (asoc->pathmtu - sctp_sk(asoc->base.sk)->pf->af->net_header_len - - sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk); + sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk)) & ~3; max = asoc->frag_point; /* If the the peer requested that we authenticate DATA chunks -- cgit v0.10.2 From a8e9fd0f7462f5cada5189513d12fe6c9cce2105 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 3 Sep 2013 03:03:10 +0400 Subject: sh_eth: NAPI requires netif_receive_skb() Driver supporting NAPI should use NAPI-specific function for receiving packets, so netif_rx() should be changed to netif_receive_skb(). Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a753928..1ccd595 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1299,7 +1299,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); - netif_rx(skb); + netif_receive_skb(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += pkt_len; } -- cgit v0.10.2 From 639739b5e609a5074839bb22fc061b37baa06269 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Tue, 3 Sep 2013 02:13:31 +0200 Subject: ipv6: fix null pointer dereference in __ip6addrlbl_add Commit b67bfe0d42cac56c512dd5da4b1b347a23f4b70a ("hlist: drop the node parameter from iterators") changed the behavior of hlist_for_each_entry_safe to leave the p argument NULL. Fix this up by tracking the last argument. Reported-by: Michele Baldessari Cc: Hideaki YOSHIFUJI Cc: Sasha Levin Signed-off-by: Hannes Frederic Sowa Tested-by: Michele Baldessari Signed-off-by: David S. Miller diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index f083a58..b30ad37 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -251,38 +251,36 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net, /* add a label */ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) { + struct hlist_node *n; + struct ip6addrlbl_entry *last = NULL, *p = NULL; int ret = 0; - ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", - __func__, - newp, replace); + ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp, + replace); - if (hlist_empty(&ip6addrlbl_table.head)) { - hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); - } else { - struct hlist_node *n; - struct ip6addrlbl_entry *p = NULL; - hlist_for_each_entry_safe(p, n, - &ip6addrlbl_table.head, list) { - if (p->prefixlen == newp->prefixlen && - net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && - p->ifindex == newp->ifindex && - ipv6_addr_equal(&p->prefix, &newp->prefix)) { - if (!replace) { - ret = -EEXIST; - goto out; - } - hlist_replace_rcu(&p->list, &newp->list); - ip6addrlbl_put(p); - goto out; - } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) || - (p->prefixlen < newp->prefixlen)) { - hlist_add_before_rcu(&newp->list, &p->list); + hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) { + if (p->prefixlen == newp->prefixlen && + net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && + p->ifindex == newp->ifindex && + ipv6_addr_equal(&p->prefix, &newp->prefix)) { + if (!replace) { + ret = -EEXIST; goto out; } + hlist_replace_rcu(&p->list, &newp->list); + ip6addrlbl_put(p); + goto out; + } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) || + (p->prefixlen < newp->prefixlen)) { + hlist_add_before_rcu(&newp->list, &p->list); + goto out; } - hlist_add_after_rcu(&p->list, &newp->list); + last = p; } + if (last) + hlist_add_after_rcu(&last->list, &newp->list); + else + hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); out: if (!ret) ip6addrlbl_table.seq++; -- cgit v0.10.2 From 36e24e2ee29e25480b4be4a9ea467f4739be22fb Mon Sep 17 00:00:00 2001 From: Duan Fugang-B38611 Date: Tue, 3 Sep 2013 10:41:18 +0800 Subject: net: fec: fix the error to get the previous BD entry Bug: error to get the previous BD entry. When the current BD is the first BD, the previous BD entry must be the last BD, not "bdp - 1" in current logic. V4: * Optimize fec_enet_get_nextdesc() for code clean. Replace "ex_new_bd - ring_size" with "ex_base". Replace "new_bd - ring_size" with "base". V3: * Restore the API name because David suggest to use fec_enet_ prefix for all function in fec driver. So, change next_bd() -> fec_enet_get_nextdesc() change pre_bd() -> fec_enet_get_prevdesc() * Reduce the two APIs parameters for easy to call. V2: * Add tx_ring_size and rx_ring_size to struct fec_enet_private. * Replace api fec_enet_get_nextdesc() with next_bd(). Replace api fec_enet_get_prevdesc() with pre_bd(). * Move all ring size check logic to next_bd() and pre_bd(), which simplifies the code redundancy. V1: * Add BD ring size check to get the previous BD entry in correctly. Reviewed-by: Li Frank Signed-off-by: Fugang Duan Acked-by: Frank Li Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ae23600..0120217 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -296,6 +296,9 @@ struct fec_enet_private { /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; + unsigned short tx_ring_size; + unsigned short rx_ring_size; + struct platform_device *pdev; int opened; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c610a27..fd90bf5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -239,22 +239,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; -static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) +static inline +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) { - struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; - if (is_ex) - return (struct bufdesc *)(ex + 1); + struct bufdesc *new_bd = bdp + 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? + ex_base : ex_new_bd); else - return bdp + 1; + return (new_bd >= (base + ring_size)) ? + base : new_bd; } -static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) +static inline +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) { - struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; - if (is_ex) - return (struct bufdesc *)(ex - 1); + struct bufdesc *new_bd = bdp - 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd < ex_base) ? + (ex_new_bd + ring_size) : ex_new_bd); else - return bdp - 1; + return (new_bd < base) ? (new_bd + ring_size) : new_bd; } static void *swap_buffer(void *bufaddr, int len) @@ -380,7 +415,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp_pre = fec_enet_get_prevdesc(bdp, fep); if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { fep->delay_work.trig_tx = true; @@ -389,10 +424,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } /* If this was the last BD in the ring, start at the beginning again. */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); fep->cur_tx = bdp; @@ -417,18 +449,18 @@ static void fec_enet_bd_init(struct net_device *dev) /* Initialize the receive buffer descriptors. */ bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) bdp->cbd_sc = BD_ENET_RX_EMPTY; else bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; fep->cur_rx = fep->rx_bd_base; @@ -436,7 +468,7 @@ static void fec_enet_bd_init(struct net_device *dev) /* ...and the same for transmit */ bdp = fep->tx_bd_base; fep->cur_tx = bdp; - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < fep->tx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; @@ -445,11 +477,11 @@ static void fec_enet_bd_init(struct net_device *dev) fep->tx_skbuff[i] = NULL; } bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; fep->dirty_tx = bdp; } @@ -510,10 +542,10 @@ fec_restart(struct net_device *ndev, int duplex) writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); if (fep->bufdesc_ex) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); else writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); for (i = 0; i <= TX_RING_MOD_MASK; i++) { @@ -727,10 +759,7 @@ fec_enet_tx(struct net_device *ndev) bdp = fep->dirty_tx; /* get next bdp of dirty_tx */ - if (bdp->cbd_sc & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { @@ -800,10 +829,7 @@ fec_enet_tx(struct net_device *ndev) fep->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); /* Since we have freed up a buffer, the ring is no longer full */ @@ -993,10 +1019,8 @@ rx_processing_done: } /* Update BD pointer to next entry */ - if (status & BD_ENET_RX_WRAP) - bdp = fep->rx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); + /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. @@ -1662,7 +1686,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) struct bufdesc *bdp; bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { skb = fep->rx_skbuff[i]; if (bdp->cbd_bufaddr) @@ -1670,11 +1694,11 @@ static void fec_enet_free_buffers(struct net_device *ndev) FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (skb) dev_kfree_skb(skb); - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) + for (i = 0; i < fep->tx_ring_size; i++) kfree(fep->tx_bounce[i]); } @@ -1686,7 +1710,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) struct bufdesc *bdp; bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) { fec_enet_free_buffers(ndev); @@ -1703,15 +1727,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < fep->tx_ring_size; i++) { fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); bdp->cbd_sc = 0; @@ -1722,11 +1746,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -1966,13 +1990,17 @@ static int fec_enet_init(struct net_device *ndev) /* Get the Ethernet address */ fec_get_mac(ndev); + /* init the tx & rx ring size */ + fep->tx_ring_size = TX_RING_SIZE; + fep->rx_ring_size = RX_RING_SIZE; + /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; if (fep->bufdesc_ex) fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); + (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); else - fep->tx_bd_base = cbd_base + RX_RING_SIZE; + fep->tx_bd_base = cbd_base + fep->rx_ring_size; /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; -- cgit v0.10.2 From 0cf915809ce775cba3f08de5df1fb89bdbd95a28 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Tue, 3 Sep 2013 13:55:07 +0800 Subject: driver:stmmac: Adjust time stamp increase for 0.465 ns accurate only when Time stamp binary rollover is set. The synopsys spec says When TSCRLSSR is cleard, the rollover value of sub-second register is 0x7FFFFFFF(0.465 ns per clock). Signed-off-by: Sonic Zhang Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index def7e75..76ad214 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -45,8 +45,8 @@ static void stmmac_config_sub_second_increment(void __iomem *ioaddr) data = (1000000000ULL / 50000000); /* 0.465ns accuracy */ - if (value & PTP_TCR_TSCTRLSSR) - data = (data * 100) / 465; + if (!(value & PTP_TCR_TSCTRLSSR)) + data = (data * 1000) / 465; writel(data, ioaddr + PTP_SSIR); } -- cgit v0.10.2 From 25a6e6b84fba601eff7c28d30da8ad7cfbef0d43 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 3 Sep 2013 13:37:01 +0200 Subject: ipv6: Don't depend on per socket memory for neighbour discovery messages Allocating skbs when sending out neighbour discovery messages currently uses sock_alloc_send_skb() based on a per net namespace socket and thus share a socket wmem buffer space. If a netdevice is temporarily unable to transmit due to carrier loss or for other reasons, the queued up ndisc messages will cosnume all of the wmem space and will thus prevent from any more skbs to be allocated even for netdevices that are able to transmit packets. The number of neighbour discovery messages sent is very limited, use of alloc_skb() bypasses the socket wmem buffer size enforcement while the manual call to skb_set_owner_w() maintains the socket reference needed for the IPv6 output path. This patch has orginally been posted by Eric Dumazet in a modified form. Signed-off-by: Thomas Graf Cc: Eric Dumazet Cc: Hannes Frederic Sowa Cc: Stephen Warren Cc: Fabio Estevam Tested-by: Fabio Estevam Tested-by: Stephen Warren Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 04d31c2..78141fa 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -372,14 +372,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev, int tlen = dev->needed_tailroom; struct sock *sk = dev_net(dev)->ipv6.ndisc_sk; struct sk_buff *skb; - int err; - skb = sock_alloc_send_skb(sk, - hlen + sizeof(struct ipv6hdr) + len + tlen, - 1, &err); + skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC); if (!skb) { - ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n", - __func__, err); + ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n", + __func__); return NULL; } @@ -389,6 +386,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev, skb_reserve(skb, hlen + sizeof(struct ipv6hdr)); skb_reset_transport_header(skb); + /* Manually assign socket ownership as we avoid calling + * sock_alloc_send_pskb() to bypass wmem buffer limits + */ + skb_set_owner_w(skb, sk); + return skb; } -- cgit v0.10.2 From 8390f81482e3bbf756dce3a5121f164140fd0412 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 3 Sep 2013 15:13:43 +0300 Subject: atm: nicstar: re-use native mac_pton() helper There is a nice helper to parse MAC. Let's use it and remove custom implementation. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 6587dc2..409502a 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -153,7 +153,6 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); static void which_list(ns_dev * card, struct sk_buff *skb); #endif static void ns_poll(unsigned long arg); -static int ns_parse_mac(char *mac, unsigned char *esi); static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); @@ -779,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev) return error; } - if (ns_parse_mac(mac[i], card->atmdev->esi)) { + if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, card->atmdev->esi, 6); if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == @@ -2802,29 +2801,6 @@ static void ns_poll(unsigned long arg) PRINTK("nicstar: Leaving ns_poll().\n"); } -static int ns_parse_mac(char *mac, unsigned char *esi) -{ - int i, j; - short byte1, byte0; - - if (mac == NULL || esi == NULL) - return -1; - j = 0; - for (i = 0; i < 6; i++) { - if ((byte1 = hex_to_bin(mac[j++])) < 0) - return -1; - if ((byte0 = hex_to_bin(mac[j++])) < 0) - return -1; - esi[i] = (unsigned char)(byte1 * 16 + byte0); - if (i < 5) { - if (mac[j++] != ':') - return -1; - } - } - return 0; -} - - static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr) { -- cgit v0.10.2 From f8de31040d50b7e4c26a5ca4c02b2929dde34a58 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 3 Sep 2013 15:17:56 +0300 Subject: atm: he: print MAC via %pM Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 507362a..449f629 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1088,15 +1088,8 @@ static int he_start(struct atm_dev *dev) for (i = 0; i < 6; ++i) dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); - hprintk("%s%s, %x:%x:%x:%x:%x:%x\n", - he_dev->prod_id, - he_dev->media & 0x40 ? "SM" : "MM", - dev->esi[0], - dev->esi[1], - dev->esi[2], - dev->esi[3], - dev->esi[4], - dev->esi[5]); + hprintk("%s%s, %pM\n", he_dev->prod_id, + he_dev->media & 0x40 ? "SM" : "MM", dev->esi); he_dev->atm_dev->link_rate = he_is622(he_dev) ? ATM_OC12_PCR : ATM_OC3_PCR; -- cgit v0.10.2 From 430eda6d6d568eded71dfd1be5a16c0c1379e201 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Tue, 3 Sep 2013 09:44:44 -0700 Subject: vxlan: Optimize vxlan rcv vxlan-udp-recv function lookup vxlan_sock struct on every packet recv by using udp-port number. we can use sk->sk_user_data to store vxlan_sock and avoid lookup. I have open coded rcu-api to store and read vxlan_sock from sk_user_data to avoid sparse warning as sk_user_data is not __rcu pointer. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ebda3a1..bd35d2d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -916,6 +916,8 @@ void vxlan_sock_release(struct vxlan_sock *vs) spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); + smp_wmb(); + vs->sock->sk->sk_user_data = NULL; spin_unlock(&vn->sock_lock); queue_work(vxlan_wq, &vs->del_work); @@ -1009,7 +1011,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) port = inet_sk(sk)->inet_sport; - vs = vxlan_find_sock(sock_net(sk), port); + smp_read_barrier_depends(); + vs = (struct vxlan_sock *)sk->sk_user_data; if (!vs) goto drop; @@ -2236,6 +2239,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, atomic_set(&vs->refcnt, 1); vs->rcv = rcv; vs->data = data; + smp_wmb(); + vs->sock->sk->sk_user_data = vs; spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); -- cgit v0.10.2 From 52f20e655d9f6f7f937a1cdacf219d9df3ab6166 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Tue, 3 Sep 2013 14:14:35 -0700 Subject: tcp: better comments for RTO initiallization Commit 1b7fdd2ab585("tcp: do not use cached RTT for RTT estimation") removes important comments on how RTO is initialized and updated. Hopefully this patch puts those information back. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 273ed73..4a22f3e 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -481,13 +481,27 @@ void tcp_init_metrics(struct sock *sk) crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT); rcu_read_unlock(); reset: + /* The initial RTT measurement from the SYN/SYN-ACK is not ideal + * to seed the RTO for later data packets because SYN packets are + * small. Use the per-dst cached values to seed the RTO but keep + * the RTT estimator variables intact (e.g., srtt, mdev, rttvar). + * Later the RTO will be updated immediately upon obtaining the first + * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only + * influences the first RTO but not later RTT estimation. + * + * But if RTT is not available from the SYN (due to retransmits or + * syn cookies) or the cache, force a conservative 3secs timeout. + * + * A bit of theory. RTT is time passed after "normal" sized packet + * is sent until it is ACKed. In normal circumstances sending small + * packets force peer to delay ACKs and calculation is correct too. + * The algorithm is adaptive and, provided we follow specs, it + * NEVER underestimate RTT. BUT! If peer tries to make some clever + * tricks sort of "quick acks" for time long enough to decrease RTT + * to low value, and then abruptly stops to do it and starts to delay + * ACKs, wait for troubles. + */ if (crtt > tp->srtt) { - /* Initial RTT (tp->srtt) from SYN usually don't measure - * serialization delay on low BW links well so RTO may be - * under-estimated. Stay conservative and seed RTO with - * the RTTs from past data exchanges, using the same seeding - * formula in tcp_rtt_estimator(). - */ inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk)); } else if (tp->srtt == 0) { /* RFC6298: 5.7 We've failed to get a valid RTT sample from -- cgit v0.10.2 From 3a1c756590633c0e86df606e5c618c190926a0df Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 3 Sep 2013 19:29:12 +0200 Subject: net: ipv6: tcp: fix potential use after free in tcp_v6_do_rcv In tcp_v6_do_rcv() code, when processing pkt options, we soley work on our skb clone opt_skb that we've created earlier before entering tcp_rcv_established() on our way. However, only in condition ... if (np->rxopt.bits.rxtclass) np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb)); ... we work on skb itself. As we extract every other information out of opt_skb in ipv6_pktoptions path, this seems wrong, since skb can already be released by tcp_rcv_established() earlier on. When we try to access it in ipv6_hdr(), we will dereference freed skb. [ Bug added by commit 4c507d2897bd9b ("net: implement IP_RECVTOS for IP_PKTOPTIONS") ] Signed-off-by: Daniel Borkmann Cc: Eric Dumazet Acked-by: Eric Dumazet Acked-by: Jiri Benc Signed-off-by: David S. Miller diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6e1649d..eeb4cb0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1427,7 +1427,7 @@ ipv6_pktoptions: if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; if (np->rxopt.bits.rxtclass) - np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb)); + np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb)) { skb_set_owner_r(opt_skb, sk); opt_skb = xchg(&np->pktoptions, opt_skb); -- cgit v0.10.2 From 89225d1ce6af3916bf32aecbe9d83f571a098588 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:37 +0200 Subject: net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12. i) RFC3810, 9.2. Query Interval [QI] says: The Query Interval variable denotes the interval between General Queries sent by the Querier. Default value: 125 seconds. [...] ii) RFC3810, 9.3. Query Response Interval [QRI] says: The Maximum Response Delay used to calculate the Maximum Response Code inserted into the periodic General Queries. Default value: 10000 (10 seconds) [...] The number of seconds represented by the [Query Response Interval] must be less than the [Query Interval]. iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says: The Older Version Querier Present Timeout is the time-out for transitioning a host back to MLDv2 Host Compatibility Mode. When an MLDv1 query is received, MLDv2 hosts set their Older Version Querier Present Timer to [Older Version Querier Present Timeout]. This value MUST be ([Robustness Variable] times (the [Query Interval] in the last Query received)) plus ([Query Response Interval]). Hence, on *default* the timeout results in: [RV] = 2, [QI] = 125sec, [QRI] = 10sec [OVQPT] = [RV] * [QI] + [QRI] = 260sec Having that said, we currently calculate [OVQPT] (here given as 'switchback' variable) as ... switchback = (idev->mc_qrv + 1) * max_delay RFC3810, 9.12. says "the [Query Interval] in the last Query received". In section "9.14. Configuring timers", it is said: This section is meant to provide advice to network administrators on how to tune these settings to their network. Ambitious router implementations might tune these settings dynamically based upon changing characteristics of the network. [...] iv) RFC38010, 9.14.2. Query Interval: The overall level of periodic MLD traffic is inversely proportional to the Query Interval. A longer Query Interval results in a lower overall level of MLD traffic. The value of the Query Interval MUST be equal to or greater than the Maximum Response Delay used to calculate the Maximum Response Code inserted in General Query messages. I assume that was why switchback is calculated as is (3 * max_delay), although this setting seems to be meant for routers only to configure their [QI] interval for non-default intervals. So usage here like this is clearly wrong. Concluding, the current behaviour in IPv6's multicast code is not conform to the RFC as switch back is calculated wrongly. That is, it has a too small value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs instead of ~260secs on default. Hence, introduce necessary helper functions and fix this up properly as it should be. Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu who did initial testing. Signed-off-by: Daniel Borkmann Cc: David Stevens Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 736b5fb..02ef772 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -171,12 +171,17 @@ struct inet6_dev { struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; spinlock_t mc_lock; - unsigned char mc_qrv; + + unsigned char mc_qrv; /* Query Robustness Variable */ unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; - unsigned long mc_v1_seen; + + unsigned long mc_v1_seen; /* Max time we stay in MLDv1 mode */ + unsigned long mc_qi; /* Query Interval */ + unsigned long mc_qri; /* Query Response Interval */ unsigned long mc_maxdelay; + struct timer_list mc_gq_timer; /* general query timer */ struct timer_list mc_ifc_timer; /* interface change timer */ struct timer_list mc_dad_timer; /* dad complete mc timer */ diff --git a/include/net/mld.h b/include/net/mld.h index 467143c..2b5421f 100644 --- a/include/net/mld.h +++ b/include/net/mld.h @@ -63,7 +63,7 @@ struct mld2_query { #define mld2q_mrc mld2q_hdr.icmp6_maxdelay #define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1] -/* Max Response Code */ +/* Max Response Code, TODO: transform this to use the below */ #define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) #define MLDV2_EXP(thresh, nbmant, nbexp, value) \ ((value) < (thresh) ? (value) : \ @@ -72,4 +72,29 @@ struct mld2_query { #define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) +/* RFC3810, 5.1.3. Maximum Response Code: + * + * If Maximum Response Code >= 32768, Maximum Response Code represents a + * floating-point value as follows: + * + * 0 1 2 3 4 5 6 7 8 9 A B C D E F + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |1| exp | mant | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +#define MLDV2_MRC_EXP(value) (((value) >> 12) & 0x0007) +#define MLDV2_MRC_MAN(value) ((value) & 0x0fff) + +/* RFC3810, 5.1.9. QQIC (Querier's Query Interval Code): + * + * If QQIC >= 128, QQIC represents a floating-point value as follows: + * + * 0 1 2 3 4 5 6 7 + * +-+-+-+-+-+-+-+-+ + * |1| exp | mant | + * +-+-+-+-+-+-+-+-+ + */ +#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07) +#define MLDV2_QQIC_MAN(value) ((value) & 0x0f) + #endif diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 98ead2b..8992ff2 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -108,6 +108,10 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev); #define MLD_QRV_DEFAULT 2 +/* RFC3810, 9.2. Query Interval */ +#define MLD_QI_DEFAULT (125 * HZ) +/* RFC3810, 9.3. Query Response Interval */ +#define MLD_QRI_DEFAULT (10 * HZ) /* RFC3810, 8.1 Query Version Distinctions */ #define MLD_V1_QUERY_LEN 24 @@ -1112,6 +1116,93 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, return true; } +static void mld_set_v1_mode(struct inet6_dev *idev) +{ + /* RFC3810, relevant sections: + * - 9.1. Robustness Variable + * - 9.2. Query Interval + * - 9.3. Query Response Interval + * - 9.12. Older Version Querier Present Timeout + */ + unsigned long switchback; + + switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri; + + idev->mc_v1_seen = jiffies + switchback; +} + +static void mld_update_qrv(struct inet6_dev *idev, + const struct mld2_query *mlh2) +{ + /* RFC3810, relevant sections: + * - 5.1.8. QRV (Querier's Robustness Variable) + * - 9.1. Robustness Variable + */ + + /* The value of the Robustness Variable MUST NOT be zero, + * and SHOULD NOT be one. Catch this here if we ever run + * into such a case in future. + */ + WARN_ON(idev->mc_qrv == 0); + + if (mlh2->mld2q_qrv > 0) + idev->mc_qrv = mlh2->mld2q_qrv; + + if (unlikely(idev->mc_qrv < 2)) { + net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n", + idev->mc_qrv, MLD_QRV_DEFAULT); + idev->mc_qrv = MLD_QRV_DEFAULT; + } +} + +static void mld_update_qi(struct inet6_dev *idev, + const struct mld2_query *mlh2) +{ + /* RFC3810, relevant sections: + * - 5.1.9. QQIC (Querier's Query Interval Code) + * - 9.2. Query Interval + * - 9.12. Older Version Querier Present Timeout + * (the [Query Interval] in the last Query received) + */ + unsigned long mc_qqi; + + if (mlh2->mld2q_qqic < 128) { + mc_qqi = mlh2->mld2q_qqic; + } else { + unsigned long mc_man, mc_exp; + + mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic); + mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic); + + mc_qqi = (mc_man | 0x10) << (mc_exp + 3); + } + + idev->mc_qi = mc_qqi * HZ; +} + +static void mld_update_qri(struct inet6_dev *idev, + const struct mld2_query *mlh2) +{ + /* RFC3810, relevant sections: + * - 5.1.3. Maximum Response Code + * - 9.3. Query Response Interval + */ + unsigned long mc_qri, mc_mrc = ntohs(mlh2->mld2q_mrc); + + if (mc_mrc < 32768) { + mc_qri = mc_mrc; + } else { + unsigned long mc_man, mc_exp; + + mc_exp = MLDV2_MRC_EXP(mc_mrc); + mc_man = MLDV2_MRC_MAN(mc_mrc); + + mc_qri = (mc_man | 0x1000) << (mc_exp + 3); + } + + idev->mc_qri = msecs_to_jiffies(mc_qri); +} + /* called with rcu_read_lock() */ int igmp6_event_query(struct sk_buff *skb) { @@ -1150,12 +1241,15 @@ int igmp6_event_query(struct sk_buff *skb) return -EINVAL; if (len == MLD_V1_QUERY_LEN) { - int switchback; /* MLDv1 router present */ - max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); - switchback = (idev->mc_qrv + 1) * max_delay; - idev->mc_v1_seen = jiffies + switchback; + + mld_set_v1_mode(idev); + + /* cancel MLDv2 report timer */ + idev->mc_gq_running = 0; + if (del_timer(&idev->mc_gq_timer)) + __in6_dev_put(idev); /* cancel the interface change timer */ idev->mc_ifc_count = 0; @@ -1166,6 +1260,10 @@ int igmp6_event_query(struct sk_buff *skb) } else if (len >= MLD_V2_QUERY_LEN_MIN) { int srcs_offset = sizeof(struct mld2_query) - sizeof(struct icmp6hdr); + + /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */ + if (MLD_V1_SEEN(idev)) + return 0; if (!pskb_may_pull(skb, srcs_offset)) return -EINVAL; @@ -1175,8 +1273,10 @@ int igmp6_event_query(struct sk_buff *skb) idev->mc_maxdelay = max_delay; - if (mlh2->mld2q_qrv) - idev->mc_qrv = mlh2->mld2q_qrv; + mld_update_qrv(idev, mlh2); + mld_update_qi(idev, mlh2); + mld_update_qri(idev, mlh2); + if (group_type == IPV6_ADDR_ANY) { /* general query */ if (mlh2->mld2q_nsrcs) return -EINVAL; /* no sources allowed */ @@ -2337,7 +2437,11 @@ void ipv6_mc_init_dev(struct inet6_dev *idev) (unsigned long)idev); setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire, (unsigned long)idev); + idev->mc_qrv = MLD_QRV_DEFAULT; + idev->mc_qi = MLD_QI_DEFAULT; + idev->mc_qri = MLD_QRI_DEFAULT; + idev->mc_maxdelay = unsolicited_report_interval(idev); idev->mc_v1_seen = 0; write_unlock_bh(&idev->lock); -- cgit v0.10.2 From 6c567b78c8a7da26e5e6f5bd458882ad9967233a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:38 +0200 Subject: net: ipv6: mld: clean up MLD_V1_SEEN macro Replace the macro with a function to make it more readable. GCC will eventually decide whether to inline this or not (also, that's not fast-path anyway). Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 8992ff2..f86e26b 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -95,6 +95,7 @@ static void mld_ifc_event(struct inet6_dev *idev); static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr); static void mld_clear_delrec(struct inet6_dev *idev); +static bool mld_in_v1_mode(const struct inet6_dev *idev); static int sf_setstate(struct ifmcaddr6 *pmc); static void sf_markstate(struct ifmcaddr6 *pmc); static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); @@ -117,11 +118,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, #define MLD_V1_QUERY_LEN 24 #define MLD_V2_QUERY_LEN_MIN 28 -#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \ - (idev)->cnf.force_mld_version == 1 || \ - ((idev)->mc_v1_seen && \ - time_before(jiffies, (idev)->mc_v1_seen))) - #define IPV6_MLD_MAX_MSF 64 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; @@ -139,7 +135,7 @@ static int unsolicited_report_interval(struct inet6_dev *idev) { int iv; - if (MLD_V1_SEEN(idev)) + if (mld_in_v1_mode(idev)) iv = idev->cnf.mldv1_unsolicited_report_interval; else iv = idev->cnf.mldv2_unsolicited_report_interval; @@ -695,7 +691,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) return; - if (MLD_V1_SEEN(mc->idev)) { + if (mld_in_v1_mode(mc->idev)) { igmp6_join_group(mc); return; } @@ -1116,6 +1112,18 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, return true; } +static bool mld_in_v1_mode(const struct inet6_dev *idev) +{ + if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version == 1) + return true; + if (idev->cnf.force_mld_version == 1) + return true; + if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen)) + return true; + + return false; +} + static void mld_set_v1_mode(struct inet6_dev *idev) { /* RFC3810, relevant sections: @@ -1262,7 +1270,7 @@ int igmp6_event_query(struct sk_buff *skb) sizeof(struct icmp6hdr); /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */ - if (MLD_V1_SEEN(idev)) + if (mld_in_v1_mode(idev)) return 0; if (!pskb_may_pull(skb, srcs_offset)) return -EINVAL; @@ -1942,7 +1950,7 @@ err_out: static void mld_resend_report(struct inet6_dev *idev) { - if (MLD_V1_SEEN(idev)) { + if (mld_in_v1_mode(idev)) { struct ifmcaddr6 *mcaddr; read_lock_bh(&idev->lock); for (mcaddr = idev->mc_list; mcaddr; mcaddr = mcaddr->next) { @@ -2006,7 +2014,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, else pmc->mca_sources = psf->sf_next; if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) && - !MLD_V1_SEEN(idev)) { + !mld_in_v1_mode(idev)) { psf->sf_crcount = idev->mc_qrv; psf->sf_next = pmc->mca_tomb; pmc->mca_tomb = psf; @@ -2306,7 +2314,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, static void igmp6_leave_group(struct ifmcaddr6 *ma) { - if (MLD_V1_SEEN(ma->idev)) { + if (mld_in_v1_mode(ma->idev)) { if (ma->mca_flags & MAF_LAST_REPORTER) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REDUCTION); @@ -2340,7 +2348,7 @@ static void mld_ifc_timer_expire(unsigned long data) static void mld_ifc_event(struct inet6_dev *idev) { - if (MLD_V1_SEEN(idev)) + if (mld_in_v1_mode(idev)) return; idev->mc_ifc_count = idev->mc_qrv; mld_ifc_start_timer(idev, 1); @@ -2351,7 +2359,7 @@ static void igmp6_timer_handler(unsigned long data) { struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data; - if (MLD_V1_SEEN(ma->idev)) + if (mld_in_v1_mode(ma->idev)) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); else mld_send_report(ma->idev, ma); -- cgit v0.10.2 From e3f5b17047dec4acd8957dad053e70d87f18d97e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:39 +0200 Subject: net: ipv6: mld: get rid of MLDV2_MRC and simplify calculation Get rid of MLDV2_MRC and use our new macros for mantisse and exponent to calculate Maximum Response Delay out of the Maximum Response Code. Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/include/net/mld.h b/include/net/mld.h index 2b5421f..faa1d16 100644 --- a/include/net/mld.h +++ b/include/net/mld.h @@ -63,15 +63,6 @@ struct mld2_query { #define mld2q_mrc mld2q_hdr.icmp6_maxdelay #define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1] -/* Max Response Code, TODO: transform this to use the below */ -#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) -#define MLDV2_EXP(thresh, nbmant, nbexp, value) \ - ((value) < (thresh) ? (value) : \ - ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ - (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) - -#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) - /* RFC3810, 5.1.3. Maximum Response Code: * * If Maximum Response Code >= 32768, Maximum Response Code represents a @@ -97,4 +88,23 @@ struct mld2_query { #define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07) #define MLDV2_QQIC_MAN(value) ((value) & 0x0f) +static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) +{ + /* RFC3810, 5.1.3. Maximum Response Code */ + unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc); + + if (mc_mrc < 32768) { + ret = mc_mrc; + } else { + unsigned long mc_man, mc_exp; + + mc_exp = MLDV2_MRC_EXP(mc_mrc); + mc_man = MLDV2_MRC_MAN(mc_mrc); + + ret = (mc_man | 0x1000) << (mc_exp + 3); + } + + return ret; +} + #endif diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 08e576a..4accd0d 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1203,7 +1203,8 @@ static int br_ip6_multicast_query(struct net_bridge *br, mld2q = (struct mld2_query *)icmp6_hdr(skb); if (!mld2q->mld2q_nsrcs) group = &mld2q->mld2q_mca; - max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; + + max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); } br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index f86e26b..005b22f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1195,20 +1195,7 @@ static void mld_update_qri(struct inet6_dev *idev, * - 5.1.3. Maximum Response Code * - 9.3. Query Response Interval */ - unsigned long mc_qri, mc_mrc = ntohs(mlh2->mld2q_mrc); - - if (mc_mrc < 32768) { - mc_qri = mc_mrc; - } else { - unsigned long mc_man, mc_exp; - - mc_exp = MLDV2_MRC_EXP(mc_mrc); - mc_man = MLDV2_MRC_MAN(mc_mrc); - - mc_qri = (mc_man | 0x1000) << (mc_exp + 3); - } - - idev->mc_qri = msecs_to_jiffies(mc_qri); + idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2)); } /* called with rcu_read_lock() */ @@ -1277,8 +1264,7 @@ int igmp6_event_query(struct sk_buff *skb) mlh2 = (struct mld2_query *)skb_transport_header(skb); - max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mlh2->mld2q_mrc))), 1UL); - + max_delay = max(msecs_to_jiffies(mldv2_mrc(mlh2)), 1UL); idev->mc_maxdelay = max_delay; mld_update_qrv(idev, mlh2); -- cgit v0.10.2 From 58c0ecfd8d9871cfa35bcdbf3e7b3ee9ca62ea67 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:40 +0200 Subject: net: ipv6: mld: implement RFC3810 MLDv2 mode only RFC3810, 10. Security Considerations says under subsection 10.1. Query Message: A forged Version 1 Query message will put MLDv2 listeners on that link in MLDv1 Host Compatibility Mode. This scenario can be avoided by providing MLDv2 hosts with a configuration option to ignore Version 1 messages completely. Hence, implement a MLDv2-only mode that will ignore MLDv1 traffic: echo 2 > /proc/sys/net/ipv6/conf/ethX/force_mld_version or echo 2 > /proc/sys/net/ipv6/conf/all/force_mld_version Note that device has a higher precedence as it was previously also the case in the macro MLD_V1_SEEN() that would "short-circuit" if condition on case. Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 005b22f..c916568 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1112,11 +1112,34 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, return true; } +static int mld_force_mld_version(const struct inet6_dev *idev) +{ + /* Normally, both are 0 here. If enforcement to a particular is + * being used, individual device enforcement will have a lower + * precedence over 'all' device (.../conf/all/force_mld_version). + */ + + if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0) + return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version; + else + return idev->cnf.force_mld_version; +} + +static bool mld_in_v2_mode_only(const struct inet6_dev *idev) +{ + return mld_force_mld_version(idev) == 2; +} + +static bool mld_in_v1_mode_only(const struct inet6_dev *idev) +{ + return mld_force_mld_version(idev) == 1; +} + static bool mld_in_v1_mode(const struct inet6_dev *idev) { - if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version == 1) - return true; - if (idev->cnf.force_mld_version == 1) + if (mld_in_v2_mode_only(idev)) + return false; + if (mld_in_v1_mode_only(idev)) return true; if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen)) return true; @@ -1223,7 +1246,6 @@ int igmp6_event_query(struct sk_buff *skb) return -EINVAL; idev = __in6_dev_get(skb->dev); - if (idev == NULL) return 0; @@ -1236,6 +1258,10 @@ int igmp6_event_query(struct sk_buff *skb) return -EINVAL; if (len == MLD_V1_QUERY_LEN) { + /* Ignore v1 queries */ + if (mld_in_v2_mode_only(idev)) + return 0; + /* MLDv1 router present */ max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); -- cgit v0.10.2 From cc7f7ab758f66a9110c8e737c3de3e9f5fc209b5 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:41 +0200 Subject: net: ipv6: mld: similarly to MLDv2 have min max_delay of 1 Similarly as we do in MLDv2 queries, set a forged MLDv1 query with 0 ms mld_maxdelay to minimum timer shot time of 1 jiffies. This is eventually done in igmp6_group_queried() anyway, so we can simplify a check there. Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index c916568..beb76b7 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1040,12 +1040,9 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) delay = ma->mca_timer.expires - jiffies; } - if (delay >= resptime) { - if (resptime) - delay = net_random() % resptime; - else - delay = 1; - } + if (delay >= resptime) + delay = net_random() % resptime; + ma->mca_timer.expires = jiffies + delay; if (!mod_timer(&ma->mca_timer, jiffies + delay)) atomic_inc(&ma->mca_refcnt); @@ -1258,12 +1255,15 @@ int igmp6_event_query(struct sk_buff *skb) return -EINVAL; if (len == MLD_V1_QUERY_LEN) { + unsigned long mldv1_md; + /* Ignore v1 queries */ if (mld_in_v2_mode_only(idev)) return 0; /* MLDv1 router present */ - max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); + mldv1_md = ntohs(mld->mld_maxdelay); + max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); mld_set_v1_mode(idev); -- cgit v0.10.2 From 2b7c121f82b47ba5efac1ef47355376d1f8d6980 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:42 +0200 Subject: net: ipv6: mld: refactor query processing into v1/v2 functions Make igmp6_event_query() a bit easier to read by refactoring code parts into mld_process_v1() and mld_process_v2(). Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index beb76b7..04399cb7 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1218,6 +1218,55 @@ static void mld_update_qri(struct inet6_dev *idev, idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2)); } +static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, + unsigned long *max_delay) +{ + unsigned long mldv1_md; + + /* Ignore v1 queries */ + if (mld_in_v2_mode_only(idev)) + return -EINVAL; + + /* MLDv1 router present */ + mldv1_md = ntohs(mld->mld_maxdelay); + *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); + + mld_set_v1_mode(idev); + + /* cancel MLDv2 report timer */ + idev->mc_gq_running = 0; + if (del_timer(&idev->mc_gq_timer)) + __in6_dev_put(idev); + + /* cancel the interface change timer */ + idev->mc_ifc_count = 0; + if (del_timer(&idev->mc_ifc_timer)) + __in6_dev_put(idev); + + /* clear deleted report items */ + mld_clear_delrec(idev); + + return 0; +} + +static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, + unsigned long *max_delay) +{ + /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */ + if (mld_in_v1_mode(idev)) + return -EINVAL; + + *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL); + + mld_update_qrv(idev, mld); + mld_update_qi(idev, mld); + mld_update_qri(idev, mld); + + idev->mc_maxdelay = *max_delay; + + return 0; +} + /* called with rcu_read_lock() */ int igmp6_event_query(struct sk_buff *skb) { @@ -1229,7 +1278,7 @@ int igmp6_event_query(struct sk_buff *skb) struct mld_msg *mld; int group_type; int mark = 0; - int len; + int len, err; if (!pskb_may_pull(skb, sizeof(struct in6_addr))) return -EINVAL; @@ -1255,47 +1304,21 @@ int igmp6_event_query(struct sk_buff *skb) return -EINVAL; if (len == MLD_V1_QUERY_LEN) { - unsigned long mldv1_md; - - /* Ignore v1 queries */ - if (mld_in_v2_mode_only(idev)) - return 0; - - /* MLDv1 router present */ - mldv1_md = ntohs(mld->mld_maxdelay); - max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); - - mld_set_v1_mode(idev); - - /* cancel MLDv2 report timer */ - idev->mc_gq_running = 0; - if (del_timer(&idev->mc_gq_timer)) - __in6_dev_put(idev); - - /* cancel the interface change timer */ - idev->mc_ifc_count = 0; - if (del_timer(&idev->mc_ifc_timer)) - __in6_dev_put(idev); - /* clear deleted report items */ - mld_clear_delrec(idev); + err = mld_process_v1(idev, mld, &max_delay); + if (err < 0) + return err; } else if (len >= MLD_V2_QUERY_LEN_MIN) { int srcs_offset = sizeof(struct mld2_query) - sizeof(struct icmp6hdr); - /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */ - if (mld_in_v1_mode(idev)) - return 0; if (!pskb_may_pull(skb, srcs_offset)) return -EINVAL; mlh2 = (struct mld2_query *)skb_transport_header(skb); - max_delay = max(msecs_to_jiffies(mldv2_mrc(mlh2)), 1UL); - idev->mc_maxdelay = max_delay; - - mld_update_qrv(idev, mlh2); - mld_update_qi(idev, mlh2); - mld_update_qri(idev, mlh2); + err = mld_process_v2(idev, mlh2, &max_delay); + if (err < 0) + return err; if (group_type == IPV6_ADDR_ANY) { /* general query */ if (mlh2->mld2q_nsrcs) -- cgit v0.10.2 From b4af8def5c083e5424c66051fe400444a01c4644 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:43 +0200 Subject: net: ipv6: mld: introduce mld_{gq, ifc, dad}_stop_timer functions We already have mld_{gq,ifc,dad}_start_timer() functions, so introduce mld_{gq,ifc,dad}_stop_timer() functions to reduce code size and make it more readable. Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 04399cb7..096cd67 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1006,6 +1006,13 @@ static void mld_gq_start_timer(struct inet6_dev *idev) in6_dev_hold(idev); } +static void mld_gq_stop_timer(struct inet6_dev *idev) +{ + idev->mc_gq_running = 0; + if (del_timer(&idev->mc_gq_timer)) + __in6_dev_put(idev); +} + static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay) { unsigned long tv = net_random() % delay; @@ -1014,6 +1021,13 @@ static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay) in6_dev_hold(idev); } +static void mld_ifc_stop_timer(struct inet6_dev *idev) +{ + idev->mc_ifc_count = 0; + if (del_timer(&idev->mc_ifc_timer)) + __in6_dev_put(idev); +} + static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay) { unsigned long tv = net_random() % delay; @@ -1022,6 +1036,12 @@ static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay) in6_dev_hold(idev); } +static void mld_dad_stop_timer(struct inet6_dev *idev) +{ + if (del_timer(&idev->mc_dad_timer)) + __in6_dev_put(idev); +} + /* * IGMP handling (alias multicast ICMPv6 messages) */ @@ -1234,15 +1254,9 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, mld_set_v1_mode(idev); /* cancel MLDv2 report timer */ - idev->mc_gq_running = 0; - if (del_timer(&idev->mc_gq_timer)) - __in6_dev_put(idev); - + mld_gq_stop_timer(idev); /* cancel the interface change timer */ - idev->mc_ifc_count = 0; - if (del_timer(&idev->mc_ifc_timer)) - __in6_dev_put(idev); - + mld_ifc_stop_timer(idev); /* clear deleted report items */ mld_clear_delrec(idev); @@ -2434,14 +2448,9 @@ void ipv6_mc_down(struct inet6_dev *idev) /* Withdraw multicast list */ read_lock_bh(&idev->lock); - idev->mc_ifc_count = 0; - if (del_timer(&idev->mc_ifc_timer)) - __in6_dev_put(idev); - idev->mc_gq_running = 0; - if (del_timer(&idev->mc_gq_timer)) - __in6_dev_put(idev); - if (del_timer(&idev->mc_dad_timer)) - __in6_dev_put(idev); + mld_ifc_stop_timer(idev); + mld_gq_stop_timer(idev); + mld_dad_stop_timer(idev); for (i = idev->mc_list; i; i=i->next) igmp6_group_dropped(i); -- cgit v0.10.2 From f21278108204ab244cd534a0d45c174ecc559267 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 4 Sep 2013 00:19:44 +0200 Subject: net: ipv6: mld: document force_mld_version in ip-sysctl.txt Document force_mld_version parameter in ip-sysctl.txt. Signed-off-by: Daniel Borkmann Cc: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 1cb3aeb..a46d785 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1358,6 +1358,11 @@ mldv2_unsolicited_report_interval - INTEGER MLDv2 report retransmit will take place. Default: 1000 (1 second) +force_mld_version - INTEGER + 0 - (default) No enforcement of a MLD version, MLDv1 fallback allowed + 1 - Enforce to use MLD version 1 + 2 - Enforce to use MLD version 2 + suppress_frag_ndisc - INTEGER Control RFC 6980 (Security Implications of IPv6 Fragmentation with IPv6 Neighbor Discovery) behavior: -- cgit v0.10.2 From d2779e99468fd83ef1493c34f3803fec9aad8345 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 4 Sep 2013 02:41:27 +0400 Subject: sh_eth: fix napi_{en|dis}able() calls racing against interrupts While implementing NAPI for the driver, I overlooked the race conditions where interrupt handler might have called napi_schedule_prep() before napi_enable() was called or after napi_disable() was called. If RX interrupt happens, this would cause the endless interrupts and messages like: sh-eth eth0: ignoring interrupt, status 0x00040000, mask 0x01ff009f. The interrupt wouldn't even be masked by the kernel eventually since the handler would return IRQ_HANDLED all the time. As a fix, move napi_enable() call before request_irq() call and napi_disable() call after free_irq() call. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 1ccd595..3426d32 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1857,11 +1857,13 @@ static int sh_eth_open(struct net_device *ndev) pm_runtime_get_sync(&mdp->pdev->dev); + napi_enable(&mdp->napi); + ret = request_irq(ndev->irq, sh_eth_interrupt, mdp->cd->irq_flags, ndev->name, ndev); if (ret) { dev_err(&ndev->dev, "Can not assign IRQ number\n"); - return ret; + goto out_napi_off; } /* Descriptor set */ @@ -1879,12 +1881,12 @@ static int sh_eth_open(struct net_device *ndev) if (ret) goto out_free_irq; - napi_enable(&mdp->napi); - return ret; out_free_irq: free_irq(ndev->irq, ndev); +out_napi_off: + napi_disable(&mdp->napi); pm_runtime_put_sync(&mdp->pdev->dev); return ret; } @@ -1976,8 +1978,6 @@ static int sh_eth_close(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - napi_disable(&mdp->napi); - netif_stop_queue(ndev); /* Disable interrupts by clearing the interrupt mask. */ @@ -1995,6 +1995,8 @@ static int sh_eth_close(struct net_device *ndev) free_irq(ndev->irq, ndev); + napi_disable(&mdp->napi); + /* Free all the skbuffs in the Rx queue. */ sh_eth_ring_free(ndev); -- cgit v0.10.2 From 8fad9c39f31f9ed7bf3526c43a4537b2fcf1a5d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Wed, 4 Sep 2013 02:13:38 +0200 Subject: bridge: prevent flooding IPv6 packets that do not have a listener MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently if there is no listener for a certain group then IPv6 packets for that group are flooded on all ports, even though there might be no host and router interested in it on a port. With this commit they are only forwarded to ports with a multicast router. Just like commit bd4265fe36 ("bridge: Only flood unregistered groups to routers") did for IPv4, let's do the same for IPv6 with the same reasoning. Signed-off-by: Linus Lüssing Signed-off-by: David S. Miller diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 4accd0d..5388955 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1491,8 +1491,14 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, * - MLD has always Router Alert hop-by-hop option * - But we do not support jumbrograms. */ - if (ip6h->version != 6 || - ip6h->nexthdr != IPPROTO_HOPOPTS || + if (ip6h->version != 6) + return 0; + + /* Prevent flooding this packet if there is no listener present */ + if (ipv6_is_transient_multicast(&ip6h->daddr)) + BR_INPUT_SKB_CB(skb)->mrouters_only = 1; + + if (ip6h->nexthdr != IPPROTO_HOPOPTS || ip6h->payload_len == 0) return 0; -- cgit v0.10.2 From 3c3769e63301fd92fcaf51870c371583dd0282ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Wed, 4 Sep 2013 02:13:39 +0200 Subject: bridge: apply multicast snooping to IPv6 link-local, too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The multicast snooping code should have matured enough to be safely applicable to IPv6 link-local multicast addresses (excluding the link-local all nodes address, ff02::1), too. Signed-off-by: Linus Lüssing Signed-off-by: David S. Miller diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index e4d5cd4..de818d9 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -9,6 +9,7 @@ #include #if IS_ENABLED(CONFIG_IPV6) #include +#include #endif #include "br_private.h" @@ -254,7 +255,7 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry) return false; #if IS_ENABLED(CONFIG_IPV6) } else if (entry->addr.proto == htons(ETH_P_IPV6)) { - if (!ipv6_is_transient_multicast(&entry->addr.u.ip6)) + if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) return false; #endif } else diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 5388955..2353147 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -29,6 +29,7 @@ #include #include #include +#include #endif #include "br_private.h" @@ -723,7 +724,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, { struct br_ip br_group; - if (!ipv6_is_transient_multicast(group)) + if (ipv6_addr_is_ll_all_nodes(group)) return 0; br_group.u.ip6 = *group; @@ -1354,7 +1355,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, { struct br_ip br_group; - if (!ipv6_is_transient_multicast(group)) + if (ipv6_addr_is_ll_all_nodes(group)) return; br_group.u.ip6 = *group; @@ -1495,7 +1496,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, return 0; /* Prevent flooding this packet if there is no listener present */ - if (ipv6_is_transient_multicast(&ip6h->daddr)) + if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) BR_INPUT_SKB_CB(skb)->mrouters_only = 1; if (ip6h->nexthdr != IPPROTO_HOPOPTS || diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d41283c..89b2949 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -476,16 +476,6 @@ extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, #define mlock_dereference(X, br) \ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) -#if IS_ENABLED(CONFIG_IPV6) -#include -static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) -{ - if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) - return 1; - return 0; -} -#endif - static inline bool br_multicast_is_router(struct net_bridge *br) { return br->multicast_router == 2 || -- cgit v0.10.2 From 822473b6c4e207a8af08518afce5dd2f2e13d765 Mon Sep 17 00:00:00 2001 From: "govindarajulu.v" Date: Wed, 4 Sep 2013 11:17:14 +0530 Subject: driver/net: enic: Add multi tx support for enic The following patch adds multi tx support for enic. Signed-off-by: Nishank Trivedi Signed-off-by: Christian Benvenuti Signed-off-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index be16731..34b637a 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -37,7 +37,7 @@ #define ENIC_BARS_MAX 6 -#define ENIC_WQ_MAX 1 +#define ENIC_WQ_MAX 8 #define ENIC_RQ_MAX 8 #define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index bcf15b1..1ab3f18 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, completed_index, enic_wq_free_buf, opaque); - if (netif_queue_stopped(enic->netdev) && + if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && vnic_wq_desc_avail(&enic->wq[q_number]) >= (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) - netif_wake_queue(enic->netdev); + netif_wake_subqueue(enic->netdev, q_number); spin_unlock(&enic->wq_lock[q_number]); @@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data) static irqreturn_t enic_isr_msix_wq(int irq, void *data) { struct enic *enic = data; - unsigned int cq = enic_cq_wq(enic, 0); - unsigned int intr = enic_msix_wq_intr(enic, 0); + unsigned int cq; + unsigned int intr; unsigned int wq_work_to_do = -1; /* no limit */ unsigned int wq_work_done; + unsigned int wq_irq; + + wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector; + cq = enic_cq_wq(enic, wq_irq); + intr = enic_msix_wq_intr(enic, wq_irq); wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, enic_wq_service, NULL); @@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); - struct vnic_wq *wq = &enic->wq[0]; + struct vnic_wq *wq; unsigned long flags; + unsigned int txq_map; if (skb->len <= 0) { dev_kfree_skb(skb); return NETDEV_TX_OK; } + txq_map = skb_get_queue_mapping(skb) % enic->wq_count; + wq = &enic->wq[txq_map]; + /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, * which is very likely. In the off chance it's going to take * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. @@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - spin_lock_irqsave(&enic->wq_lock[0], flags); + spin_lock_irqsave(&enic->wq_lock[txq_map], flags); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { - netif_stop_queue(netdev); + netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); - spin_unlock_irqrestore(&enic->wq_lock[0], flags); + spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); return NETDEV_TX_BUSY; } enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) - netif_stop_queue(netdev); + netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); - spin_unlock_irqrestore(&enic->wq_lock[0], flags); + spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); return NETDEV_TX_OK; } @@ -1369,7 +1378,7 @@ static int enic_open(struct net_device *netdev) enic_set_rx_mode(netdev); - netif_wake_queue(netdev); + netif_tx_wake_all_queues(netdev); for (i = 0; i < enic->rq_count; i++) napi_enable(&enic->napi[i]); @@ -2032,7 +2041,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * instance data is initialized to zero. */ - netdev = alloc_etherdev(sizeof(struct enic)); + netdev = alloc_etherdev_mqs(sizeof(struct enic), + ENIC_RQ_MAX, ENIC_WQ_MAX); if (!netdev) return -ENOMEM; @@ -2198,6 +2208,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_dev_close; } + netif_set_real_num_tx_queues(netdev, enic->wq_count); + /* Setup notification timer, HW reset task, and wq locks */ -- cgit v0.10.2 From bf751ba802fe57f4f3aa5555e1446387912bef9e Mon Sep 17 00:00:00 2001 From: "govindarajulu.v" Date: Wed, 4 Sep 2013 11:17:15 +0530 Subject: driver/net: enic: record q_number and rss_hash for skb The following patch sets the skb->rxhash and skb->q_number. This is used by RPS and RFS. Kernel can make use of hw provided hash instead of calculating the hash. Signed-off-by: Govindarajulu Varadarajan Signed-off-by: Nishank Trivedi Signed-off-by: Christian Benvenuti Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 1ab3f18..93898ba 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1034,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, q_number); + if (netdev->features & NETIF_F_RXHASH) { + skb->rxhash = rss_hash; + if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) + skb->l4_rxhash = true; + } if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { skb->csum = htons(checksum); @@ -2209,6 +2217,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netif_set_real_num_tx_queues(netdev, enic->wq_count); + netif_set_real_num_rx_queues(netdev, enic->rq_count); /* Setup notification timer, HW reset task, and wq locks */ @@ -2258,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ENIC_SETTING(enic, TSO)) netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; + if (ENIC_SETTING(enic, RSS)) + netdev->hw_features |= NETIF_F_RXHASH; if (ENIC_SETTING(enic, RXCSUM)) netdev->hw_features |= NETIF_F_RXCSUM; -- cgit v0.10.2 From 624dbf55a359b1d8e335c046d4e57393e7d1916d Mon Sep 17 00:00:00 2001 From: "govindarajulu.v" Date: Wed, 4 Sep 2013 11:17:16 +0530 Subject: driver/net: enic: Try DMA 64 first, then failover to DMA In servers with more than 1.1 TB of RAM, the existing 40/32 bit DMA could cause failure as the DMA-able address could go outside the range addressable using 40/32 bits. The following patch first tried 64 bit DMA if possible, failover to 32 bit. Signed-off-by: Sujith Sankar Signed-off-by: Christian Benvenuti Signed-off-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 93898ba..7b756cf9 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2080,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 40-bit first, and + * limitation for the device. Try 64-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -2098,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 40); + "for consistent allocations, aborting\n", 64); goto err_out_release_regions; } using_dac = 1; -- cgit v0.10.2 From 4a50ddfda72881d5d67a2b25f9b8a120b2765125 Mon Sep 17 00:00:00 2001 From: "govindarajulu.v" Date: Wed, 4 Sep 2013 11:17:17 +0530 Subject: driver/net: enic: Exposing symbols for Cisco's low latency driver This patch exposes symbols for usnic low latency driver that can be used to register and unregister vNics as well to traverse the resources on vNics. Signed-off-by: Upinder Malhi Signed-off-by: Nishank Trivedi Signed-off-by: Christian Benvenuti Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 97455c5..69dd925 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, { return vdev->res[type].count; } +EXPORT_SYMBOL(vnic_dev_get_res_count); void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index) @@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, return (char __iomem *)vdev->res[type].vaddr; } } +EXPORT_SYMBOL(vnic_dev_get_res); static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) @@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev) kfree(vdev); } } +EXPORT_SYMBOL(vnic_dev_unregister); struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, @@ -969,6 +972,13 @@ err_out: vnic_dev_unregister(vdev); return NULL; } +EXPORT_SYMBOL(vnic_dev_register); + +struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) +{ + return vdev->pdev; +} +EXPORT_SYMBOL(vnic_dev_get_pdev); int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) { diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index f3d9b79..e670029 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, unsigned int num_bars); +struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev); int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_enable2(struct vnic_dev *vdev, int active); int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); -- cgit v0.10.2 From 001e1c1d5ec6282726e3d051eeaf776504ee8e45 Mon Sep 17 00:00:00 2001 From: "govindarajulu.v" Date: Wed, 4 Sep 2013 11:17:18 +0530 Subject: driver/net: enic: update enic maintainers and driver Signed-off-by: Govindarajulu Varadarajan Signed-off-by: Sujith Sankar Signed-off-by: Nishank Trivedi Signed-off-by: Christian Benvenuti Signed-off-by: David S. Miller diff --git a/MAINTAINERS b/MAINTAINERS index 705bb96..ecb83cd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2076,7 +2076,8 @@ F: drivers/usb/chipidea/ CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti -M: Roopa Prabhu +M: Sujith Sankar +M: Govindarajulu Varadarajan M: Neel Patel M: Nishank Trivedi S: Supported diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 34b637a..e9f7c65 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -32,7 +32,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.43" +#define DRV_VERSION "2.1.1.50" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 -- cgit v0.10.2 From eef23b53985f8e08256f24d76e576d69fb0d44d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Wed, 4 Sep 2013 09:42:32 +0200 Subject: net: usbnet: update addr_assign_type if appropriate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This module generates a common default address on init, using eth_random_addr. Set addr_assign_type to let userspace know the address is random unless it was overridden by the minidriver. Signed-off-by: Bjørn Mork Acked-by: Oliver Neukum Signed-off-by: David S. Miller diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index e4811d7..74a8e3c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1629,6 +1629,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); + /* let userspace know we have a random address */ + if (ether_addr_equal(net->dev_addr, node_id)) + net->addr_assign_type = NET_ADDR_RANDOM; + if ((dev->driver_info->flags & FLAG_WLAN) != 0) SET_NETDEV_DEVTYPE(net, &wlan_type); if ((dev->driver_info->flags & FLAG_WWAN) != 0) -- cgit v0.10.2 From 53cf527513eed6e7170e9dceacd198f9267171b0 Mon Sep 17 00:00:00 2001 From: Joseph Gasparakis Date: Wed, 4 Sep 2013 02:13:38 -0700 Subject: vxlan: Notify drivers for listening UDP port changes This patch adds two more ndo ops: ndo_add_rx_vxlan_port() and ndo_del_rx_vxlan_port(). Drivers can get notifications through the above functions about changes of the UDP listening port of VXLAN. Also, when physical ports come up, now they can call vxlan_get_rx_port() in order to obtain the port number(s) of the existing VXLAN interface in case they already up before them. This information about the listening UDP port would be used for VXLAN related offloads. A big thank you to John Fastabend (john.r.fastabend@intel.com) for his input and his suggestions on this patch set. CC: John Fastabend CC: Stephen Hemminger Signed-off-by: Joseph Gasparakis Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index bd35d2d..bf64b41 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -558,6 +558,40 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, return 1; } +/* Notify netdevs that UDP port started listening */ +static void vxlan_notify_add_rx_port(struct sock *sk) +{ + struct net_device *dev; + struct net *net = sock_net(sk); + sa_family_t sa_family = sk->sk_family; + u16 port = htons(inet_sk(sk)->inet_sport); + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->netdev_ops->ndo_add_vxlan_port) + dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, + port); + } + rcu_read_unlock(); +} + +/* Notify netdevs that UDP port is no more listening */ +static void vxlan_notify_del_rx_port(struct sock *sk) +{ + struct net_device *dev; + struct net *net = sock_net(sk); + sa_family_t sa_family = sk->sk_family; + u16 port = htons(inet_sk(sk)->inet_sport); + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->netdev_ops->ndo_del_vxlan_port) + dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family, + port); + } + rcu_read_unlock(); +} + /* Add new entry to forwarding table -- assumes lock held */ static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, @@ -909,7 +943,9 @@ static void vxlan_sock_hold(struct vxlan_sock *vs) void vxlan_sock_release(struct vxlan_sock *vs) { - struct vxlan_net *vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); + struct sock *sk = vs->sock->sk; + struct net *net = sock_net(sk); + struct vxlan_net *vn = net_generic(net, vxlan_net_id); if (!atomic_dec_and_test(&vs->refcnt)) return; @@ -918,6 +954,7 @@ void vxlan_sock_release(struct vxlan_sock *vs) hlist_del_rcu(&vs->hlist); smp_wmb(); vs->sock->sk->sk_user_data = NULL; + vxlan_notify_del_rx_port(sk); spin_unlock(&vn->sock_lock); queue_work(vxlan_wq, &vs->del_work); @@ -1983,6 +2020,34 @@ static struct device_type vxlan_type = { .name = "vxlan", }; +/* Calls the ndo_add_vxlan_port of the caller in order to + * supply the listening VXLAN udp ports. + */ +void vxlan_get_rx_port(struct net_device *dev) +{ + struct vxlan_sock *vs; + struct net *net = dev_net(dev); + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + sa_family_t sa_family; + u16 port; + int i; + + if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port) + return; + + spin_lock(&vn->sock_lock); + for (i = 0; i < PORT_HASH_SIZE; ++i) { + hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) { + port = htons(inet_sk(vs->sock->sk)->inet_sport); + sa_family = vs->sock->sk->sk_family; + dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, + port); + } + } + spin_unlock(&vn->sock_lock); +} +EXPORT_SYMBOL_GPL(vxlan_get_rx_port); + /* Initialize the device structure. */ static void vxlan_setup(struct net_device *dev) { @@ -2244,6 +2309,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); + vxlan_notify_add_rx_port(sk); spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ad49b8..8ed4ae9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -948,6 +948,19 @@ struct netdev_phys_port_id { * Called to get ID of physical port of this device. If driver does * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. + * + * void (*ndo_add_vxlan_port)(struct net_device *dev, + * sa_family_t sa_family, __u16 port); + * Called by vxlan to notiy a driver about the UDP port and socket + * address family that vxlan is listnening to. It is called only when + * a new port starts listening. The operation is protected by the + * vxlan_net->sock_lock. + * + * void (*ndo_del_vxlan_port)(struct net_device *dev, + * sa_family_t sa_family, __u16 port); + * Called by vxlan to notify the driver about a UDP port and socket + * address family that vxlan is not listening to anymore. The operation + * is protected by the vxlan_net->sock_lock. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1078,6 +1091,12 @@ struct net_device_ops { bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, struct netdev_phys_port_id *ppid); + void (*ndo_add_vxlan_port)(struct net_device *dev, + sa_family_t sa_family, + __u16 port); + void (*ndo_del_vxlan_port)(struct net_device *dev, + sa_family_t sa_family, + __u16 port); }; /* diff --git a/include/net/vxlan.h b/include/net/vxlan.h index e09c40b..2d64d3c 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -36,4 +36,5 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb); +void vxlan_get_rx_port(struct net_device *netdev); #endif -- cgit v0.10.2 From b9871bcfd211d316adee317608dab44c58d6ea2d Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Wed, 4 Sep 2013 14:09:21 +0300 Subject: bnx2x: VF RSS support - PF side This patch adds support for Receive Side Scaling for queues of Virtual Functions on the PF side. This includes support for the requests for multiple queues from VF drivers, configuration of the HW for multiple queues per VF, and support for rss configuration of said queues. Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 3e77a1b..0c33802 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ -#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ +#define BNX2X_DB_SHIFT 3 /* 8 bytes*/ #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #error "Min DB doorbell stride is 8" #endif -#define DPM_TRIGER_TYPE 0x40 #define DOORBELL(bp, cid, val) \ do { \ - writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ - DPM_TRIGER_TYPE); \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ } while (0) /* TX CSUM helpers */ @@ -1100,13 +1098,27 @@ struct bnx2x_port { extern struct workqueue_struct *bnx2x_wq; #define BNX2X_MAX_NUM_OF_VFS 64 -#define BNX2X_VF_CID_WND 0 +#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) -#define BNX2X_CLIENTS_PER_VF 1 -#define BNX2X_FIRST_VF_CID 256 + +/* We need to reserve doorbell addresses for all VF and queue combinations */ #define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) + +/* The doorbell is configured to have the same number of CIDs for PFs and for + * VFs. For this reason the PF CID zone is as large as the VF zone. + */ +#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS +#define BNX2X_MAX_NUM_VF_QUEUES 64 #define BNX2X_VF_ID_INVALID 0xFF +/* the number of VF CIDS multiplied by the amount of bytes reserved for each + * cid must not exceed the size of the VF doorbell + */ +#define BNX2X_VF_BAR_SIZE 512 +#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT)) +#error "VF doorbell bar size is 512" +#endif + /* * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * control by the number of fast-path status blocks supported by the @@ -1650,10 +1662,10 @@ struct bnx2x { dma_addr_t fw_stats_data_mapping; int fw_stats_data_sz; - /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB + /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB * context size we need 8 ILT entries. */ -#define ILT_MAX_L2_LINES 8 +#define ILT_MAX_L2_LINES 32 struct hw_context context[ILT_MAX_L2_LINES]; struct bnx2x_ilt *ilt; @@ -1869,7 +1881,7 @@ extern int num_queues; #define FUNC_FLG_TPA 0x0008 #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ - +#define FUNC_FLG_LEADING_STATS 0x0040 struct bnx2x_func_init_params { /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2e90868..e7400d9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4784,6 +4784,11 @@ int bnx2x_resume(struct pci_dev *pdev) void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { + if (!cxt) { + BNX2X_ERR("bad context pointer %p\n", cxt); + return; + } + /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 17f117c..5729aa7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); - REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); + if (!CHIP_REV_IS_SLOW(bp)) /* enable hw interrupt from doorbell Q */ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 8e627b8..5ecf267 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -6335,6 +6335,7 @@ #define PCI_ID_VAL2 0x438 #define PCI_ID_VAL3 0x43c +#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C #define GRC_CONFIG_REG_PF_INIT_VF 0x624 #define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf /* First VF_NUM for PF is encoded in this register. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 1d46b68..9fbeee5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4416,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, rss_obj->config_rss = bnx2x_setup_rss; } +int validate_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac) +{ + if (!vlan_mac->get_n_elements) { + BNX2X_ERR("vlan mac object was not intialized\n"); + return -EINVAL; + } + return 0; +} + /********************** Queue state object ***********************************/ /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 533a3ab..658f4e3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1407,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, u8 *ind_table); +int validate_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac); #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index fbc026c..73731eb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state { BNX2X_VFOP_QTEARDOWN_DONE }; +enum bnx2x_vfop_rss_state { + BNX2X_VFOP_RSS_CONFIG, + BNX2X_VFOP_RSS_DONE +}; + #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); - if (vfq_is_leading(q)) { - __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); - __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); - } - /* Setup-op rx parameters */ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; @@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) BNX2X_Q_LOGICAL_STATE_STOPPED) { DP(BNX2X_MSG_IOV, "Entered qdtor but queue was already stopped. Aborting gracefully\n"); - goto op_done; + + /* next state */ + vfop->state = BNX2X_VFOP_QDTOR_DONE; + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); } /* next state */ @@ -432,8 +436,10 @@ op_err: op_done: case BNX2X_VFOP_QDTOR_DONE: /* invalidate the context */ - qdtor->cxt->ustorm_ag_context.cdu_usage = 0; - qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + if (qdtor->cxt) { + qdtor->cxt->ustorm_ag_context.cdu_usage = 0; + qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + } bnx2x_vfop_end(bp, vf, vfop); return; default: @@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, cmd->block); } - DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); + DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", + vf->abs_vfid, vfop->rc); return -ENOMEM; } @@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) { struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); if (vf) { + /* the first igu entry belonging to VFs of this PF */ + if (!BP_VFDB(bp)->first_vf_igu_entry) + BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; + + /* the first igu entry belonging to this VF */ if (!vf_sb_count(vf)) vf->igu_base_id = igu_sb_id; + ++vf_sb_count(vf); + ++vf->sb_count; } + BP_VFDB(bp)->vf_sbs_pool++; } /* VFOP MAC/VLAN helpers */ @@ -733,6 +748,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -752,6 +768,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); /* set extra args */ @@ -772,6 +791,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -794,6 +814,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); /* set extra args */ @@ -814,6 +837,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, int qid, u16 vid, bool add) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -834,6 +858,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, ramrod->user_req.u.vlan.vlan = vid; /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -853,6 +880,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -872,6 +900,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -892,6 +923,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -911,6 +943,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -1021,21 +1056,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) case BNX2X_VFOP_QFLR_CLR_VLAN: /* vlan-clear-all: driver-only, don't consume credit */ vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); + if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) + vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, + true); if (vfop->rc) goto op_err; - return; + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); case BNX2X_VFOP_QFLR_CLR_MAC: /* mac-clear-all: driver only consume credit */ vfop->state = BNX2X_VFOP_QFLR_TERMINATE; - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); + if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) + vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, + true); DP(BNX2X_MSG_IOV, "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", vf->abs_vfid, vfop->rc); if (vfop->rc) goto op_err; - return; + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); case BNX2X_VFOP_QFLR_TERMINATE: qstate = &vfop->op_p->qctor.qstate; @@ -1332,10 +1371,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + /* for non leading queues skip directly to qdown sate */ if (vfop) { vfop->args.qx.qid = qid; - bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, - bnx2x_vfop_qdown, cmd->done); + bnx2x_vfop_opset(qid == LEADING_IDX ? + BNX2X_VFOP_QTEARDOWN_RXMODE : + BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, + cmd->done); return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, cmd->block); } @@ -1488,15 +1530,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) * both known */ static void -bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) +bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { + struct vf_pf_resc_request *resc = &vf->alloc_resc; u16 vlan_count = 0; /* will be set only during VF-ACQUIRE */ resc->num_rxqs = 0; resc->num_txqs = 0; - /* no credit calculcis for macs (just yet) */ + /* no credit calculations for macs (just yet) */ resc->num_mac_filters = 1; /* divvy up vlan rules */ @@ -1508,13 +1551,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) resc->num_mc_filters = 0; /* num_sbs already set */ + resc->num_sbs = vf->sb_count; } /* FLR routines: */ static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { /* reset the state variables */ - bnx2x_iov_static_resc(bp, &vf->alloc_resc); + bnx2x_iov_static_resc(bp, vf); vf->state = VF_FREE; } @@ -1734,8 +1778,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match * the Pf doorbell size although the 2 are independent. */ - REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, - BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); + REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); /* No security checks for now - * configure single rule (out of 16) mask = 0x1, value = 0x0, @@ -1802,7 +1845,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { int sb_id; u32 val; - u8 fid; + u8 fid, current_pf = 0; /* IGU in normal mode - read CAM */ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { @@ -1810,16 +1853,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) continue; fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); - if (!(fid & IGU_FID_ENCODE_IS_PF)) + if (fid & IGU_FID_ENCODE_IS_PF) + current_pf = fid & IGU_FID_PF_NUM_MASK; + else if (current_pf == BP_ABS_FUNC(bp)) bnx2x_vf_set_igu_info(bp, sb_id, (fid & IGU_FID_VF_NUM_MASK)); - DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : (fid & IGU_FID_VF_NUM_MASK)), sb_id, GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); } + DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); } static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) @@ -1885,23 +1930,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) return 0; } -static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) -{ - int i; - u8 queue_count = 0; - - if (IS_SRIOV(bp)) - for_each_vf(bp, i) - queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); - - return queue_count; -} - /* must be called after PF bars are mapped */ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, - int num_vfs_param) + int num_vfs_param) { - int err, i, qcount; + int err, i; struct bnx2x_sriov *iov; struct pci_dev *dev = bp->pdev; @@ -1999,12 +2032,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ bnx2x_get_vf_igu_cam_info(bp); - /* get the total queue count and allocate the global queue arrays */ - qcount = bnx2x_iov_get_max_queue_count(bp); - /* allocate the queue arrays for all VFs */ - bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), - GFP_KERNEL); + bp->vfdb->vfqs = kzalloc( + BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), + GFP_KERNEL); + + DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); + if (!bp->vfdb->vfqs) { BNX2X_ERR("failed to allocate vf queue array\n"); err = -ENOMEM; @@ -2125,49 +2159,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, q_type); DP(BNX2X_MSG_IOV, - "initialized vf %d's queue object. func id set to %d\n", - vf->abs_vfid, q->sp_obj.func_id); - - /* mac/vlan objects are per queue, but only those - * that belong to the leading queue are initialized - */ - if (vfq_is_leading(q)) { - /* mac */ - bnx2x_init_mac_obj(bp, &q->mac_obj, - cl_id, q->cid, func_id, - bnx2x_vf_sp(bp, vf, mac_rdata), - bnx2x_vf_sp_map(bp, vf, mac_rdata), - BNX2X_FILTER_MAC_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX, - &bp->macs_pool); - /* vlan */ - bnx2x_init_vlan_obj(bp, &q->vlan_obj, - cl_id, q->cid, func_id, - bnx2x_vf_sp(bp, vf, vlan_rdata), - bnx2x_vf_sp_map(bp, vf, vlan_rdata), - BNX2X_FILTER_VLAN_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX, - &bp->vlans_pool); - - /* mcast */ - bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, - q->cid, func_id, func_id, - bnx2x_vf_sp(bp, vf, mcast_rdata), - bnx2x_vf_sp_map(bp, vf, mcast_rdata), - BNX2X_FILTER_MCAST_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX); - - vf->leading_rss = cl_id; - } + "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", + vf->abs_vfid, q->sp_obj.func_id, q->cid); } /* called by bnx2x_nic_load */ int bnx2x_iov_nic_init(struct bnx2x *bp) { - int vfid, qcount, i; + int vfid; if (!IS_SRIOV(bp)) { DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); @@ -2196,7 +2195,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); /* init statically provisioned resources */ - bnx2x_iov_static_resc(bp, &vf->alloc_resc); + bnx2x_iov_static_resc(bp, vf); /* queues are initialized during VF-ACQUIRE */ @@ -2232,13 +2231,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) } /* Final VF init */ - qcount = 0; - for_each_vf(bp, i) { - struct bnx2x_virtf *vf = BP_VF(bp, i); + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); /* fill in the BDF and bars */ - vf->bus = bnx2x_vf_bus(bp, i); - vf->devfn = bnx2x_vf_devfn(bp, i); + vf->bus = bnx2x_vf_bus(bp, vfid); + vf->devfn = bnx2x_vf_devfn(bp, vfid); bnx2x_vf_set_bars(bp, vf); DP(BNX2X_MSG_IOV, @@ -2247,10 +2245,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) (unsigned)vf->bars[0].bar, vf->bars[0].size, (unsigned)vf->bars[1].bar, vf->bars[1].size, (unsigned)vf->bars[2].bar, vf->bars[2].size); - - /* set local queue arrays */ - vf->vfqs = &bp->vfdb->vfqs[qcount]; - qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); } return 0; @@ -2556,6 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) for_each_vfq(vf, j) { struct bnx2x_vf_queue *rxq = vfq_get(vf, j); + dma_addr_t q_stats_addr = + vf->fw_stat_map + j * vf->stats_stride; + /* collect stats fro active queues only */ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == BNX2X_Q_LOGICAL_STATE_STOPPED) @@ -2563,13 +2560,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) /* create stats query entry for this queue */ cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = vfq_cl_id(vf, rxq); + cur_query_entry->index = vfq_stat_id(vf, rxq); cur_query_entry->funcID = cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); cur_query_entry->address.hi = - cpu_to_le32(U64_HI(vf->fw_stat_map)); + cpu_to_le32(U64_HI(q_stats_addr)); cur_query_entry->address.lo = - cpu_to_le32(U64_LO(vf->fw_stat_map)); + cpu_to_le32(U64_LO(q_stats_addr)); DP(BNX2X_MSG_IOV, "added address %x %x for vf %d queue %d client %d\n", cur_query_entry->address.hi, @@ -2578,6 +2575,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) cur_query_entry++; cur_data_offset += sizeof(struct per_queue_stats); stats_count++; + + /* all stats are coalesced to the leading queue */ + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + break; } } bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; @@ -2596,6 +2597,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp) for_each_vf(bp, i) { struct bnx2x_virtf *vf = BP_VF(bp, i); + if (!vf) { + BNX2X_ERR("VF was null! skipping...\n"); + continue; + } + if (!list_empty(&vf->op_list_head) && atomic_read(&vf->op_in_progress)) { DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); @@ -2743,7 +2749,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q = vfq_get(vf, i); if (!q) { - DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); + BNX2X_ERR("q number %d was not allocated\n", i); return -EINVAL; } @@ -2947,6 +2953,43 @@ op_done: bnx2x_vfop_end(bp, vf, vfop); } +static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + enum bnx2x_vfop_rss_state state; + + if (!vfop) { + BNX2X_ERR("vfop was null\n"); + return; + } + + state = vfop->state; + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_RSS_CONFIG: + /* next state */ + vfop->state = BNX2X_VFOP_RSS_DONE; + bnx2x_config_rss(bp, &vfop->op_p->rss); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); +op_err: + BNX2X_ERR("RSS error: rc %d\n", vfop->rc); +op_done: + case BNX2X_VFOP_RSS_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +op_pending: + return; +} + int bnx2x_vfop_release_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd) @@ -2961,6 +3004,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, return -ENOMEM; } +int bnx2x_vfop_rss_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, + cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, + cmd->block); + } + return -ENOMEM; +} + /* VF release ~ VF close + VF release-resources * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. @@ -2972,6 +3030,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) .block = block, }; int rc; + + DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); @@ -3000,6 +3060,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { + /* we don't lock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(tlv)) { + BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); + return; + } + /* lock the channel */ mutex_lock(&vf->op_mutex); @@ -3014,19 +3080,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs expected_tlv) { + enum channel_tlvs current_tlv; + + if (!vf) { + BNX2X_ERR("VF was %p\n", vf); + return; + } + + current_tlv = vf->op_current; + + /* we don't unlock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(expected_tlv)) + return; + WARN(expected_tlv != vf->op_current, "lock mismatch: expected %d found %d", expected_tlv, vf->op_current); + /* record the locking op */ + vf->op_current = CHANNEL_TLV_NONE; + /* lock the channel */ mutex_unlock(&vf->op_mutex); /* log the unlock */ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", vf->abs_vfid, vf->op_current); - - /* record the locking op */ - vf->op_current = CHANNEL_TLV_NONE; } int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) @@ -3057,11 +3136,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) return bnx2x_enable_sriov(bp); } } +#define IGU_ENTRY_SIZE 4 int bnx2x_enable_sriov(struct bnx2x *bp) { int rc = 0, req_vfs = bp->requested_nr_virtfn; + int vf_idx, sb_idx, vfq_idx, qcount, first_vf; + u32 igu_entry, address; + u16 num_vf_queues; + + if (req_vfs == 0) + return 0; + + first_vf = bp->vfdb->sriov.first_vf_in_pf; + + /* statically distribute vf sb pool between VFs */ + num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, + BP_VFDB(bp)->vf_sbs_pool / req_vfs); + + /* zero previous values learned from igu cam */ + for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + vf->sb_count = 0; + vf_sb_count(BP_VF(bp, vf_idx)) = 0; + } + bp->vfdb->vf_sbs_pool = 0; + + /* prepare IGU cam */ + sb_idx = BP_VFDB(bp)->first_vf_igu_entry; + address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { + igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | + vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | + IGU_REG_MAPPING_MEMORY_VALID; + DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", + sb_idx, vf_idx); + REG_WR(bp, address, igu_entry); + sb_idx++; + address += IGU_ENTRY_SIZE; + } + } + + /* Reinitialize vf database according to igu cam */ + bnx2x_get_vf_igu_cam_info(bp); + + DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", + BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); + + qcount = 0; + for_each_vf(bp, vf_idx) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + /* set local queue arrays */ + vf->vfqs = &bp->vfdb->vfqs[qcount]; + qcount += vf_sb_count(vf); + } + + /* prepare msix vectors in VF configuration space */ + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); + REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, + num_vf_queues); + } + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* enable sriov. This will probe all the VFs, and consequentially cause + * the "acquire" messages to appear on the VF PF channel. + */ + DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); + pci_disable_sriov(bp->pdev); rc = pci_enable_sriov(bp->pdev, req_vfs); if (rc) { BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); @@ -3089,9 +3234,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp) pci_disable_sriov(bp->pdev); } -static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, - struct bnx2x_virtf **vf, - struct pf_vf_bulletin_content **bulletin) +int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin) { if (bp->state != BNX2X_STATE_OPEN) { BNX2X_ERR("vf ndo called though PF is down\n"); @@ -3114,7 +3258,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, *bulletin = BP_VF_BULLETIN(bp, vfidx); if (!*vf) { - BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", + BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", + vfidx); + return -EINVAL; + } + + if (!(*vf)->vfqs) { + BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", vfidx); return -EINVAL; } @@ -3142,8 +3292,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); if (rc) return rc; - mac_obj = &bnx2x_vfq(vf, 0, mac_obj); - vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); if (!mac_obj || !vlan_obj) { BNX2X_ERR("VF partially initialized\n"); return -EINVAL; @@ -3155,10 +3305,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->spoofchk = 1; /*always enabled */ if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ - mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, - 0, ETH_ALEN); - vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, - 0, VLAN_HLEN); + if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) + mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, + 0, ETH_ALEN); + if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) + vlan_obj->get_n_elements(bp, vlan_obj, 1, + (u8 *)&ivi->vlan, 0, + VLAN_HLEN); } else { /* mac */ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) @@ -3226,14 +3379,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) return rc; } - /* is vf initialized and queue set up? */ q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); if (vf->state == VF_ENABLED && q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the mac in device on this vf's queue */ unsigned long ramrod_flags = 0; - struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); + struct bnx2x_vlan_mac_obj *mac_obj = + &bnx2x_leading_vfq(vf, mac_obj); + + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; /* must lock vfpf channel to protect against vf flows */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); @@ -3293,18 +3450,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) /* is vf initialized and queue set up? */ q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); if (vf->state == VF_ENABLED && q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the vlan in device on this vf's queue */ unsigned long ramrod_flags = 0; unsigned long vlan_mac_flags = 0; struct bnx2x_vlan_mac_obj *vlan_obj = - &bnx2x_vfq(vf, 0, vlan_obj); + &bnx2x_leading_vfq(vf, vlan_obj); struct bnx2x_vlan_mac_ramrod_params ramrod_param; struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_queue_update_params *update_params; + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* must lock vfpf channel to protect against vf flows */ @@ -3324,7 +3484,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); update_params = &q_params.params.update; __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, &update_params->update_flags); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d143a7c..8e9847f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -81,6 +81,7 @@ struct bnx2x_vf_queue { u32 cid; u16 index; u16 sb_idx; + bool is_leading; }; /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: @@ -194,6 +195,7 @@ struct bnx2x_virtf { #define VF_CFG_INT_SIMD 0x0008 #define VF_CACHE_LINE 0x0010 #define VF_CFG_VLAN 0x0020 +#define VF_CFG_STATS_COALESCE 0x0040 u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ @@ -213,6 +215,7 @@ struct bnx2x_virtf { /* dma */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + u16 stats_stride; dma_addr_t spq_map; dma_addr_t bulletin_map; @@ -239,7 +242,10 @@ struct bnx2x_virtf { u8 igu_base_id; /* base igu status block id */ struct bnx2x_vf_queue *vfqs; -#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define LEADING_IDX 0 +#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) +#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) u8 index; /* index in the vf array */ u8 abs_vfid; @@ -358,6 +364,10 @@ struct bnx2x_vf_sp { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; } q_data; + + union { + struct eth_rss_update_ramrod_data e2; + } rss_rdata; }; struct hw_dma { @@ -403,6 +413,10 @@ struct bnx2x_vfdb { #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) u32 flrd_vfs[FLRD_VFS_DWORDS]; + + /* the number of msix vectors belonging to this PF designated for VFs */ + u16 vf_sbs_pool; + u16 first_vf_igu_entry; }; /* queue access */ @@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) return &(vf->vfqs[index]); } -static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq) -{ - return (vfq->index == 0); -} - /* FW ids */ static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) { @@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) { - return vfq_cl_id(vf, q); + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + return vf->leading_rss; + else + return vfq_cl_id(vf, q); } static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) @@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd); +int bnx2x_vfop_rss_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd); + /* VF release ~ VF close + VF release-resources * * Release is the ultimate SW shutdown and is called whenever an @@ -758,7 +774,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp); void bnx2x_disable_sriov(struct bnx2x *bp); static inline int bnx2x_vf_headroom(struct bnx2x *bp) { - return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; + return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; } void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 2088063..a7e88a4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* humble our request */ req->resc_request.num_txqs = - bp->acquire_resp.resc.num_txqs; + min(req->resc_request.num_txqs, + bp->acquire_resp.resc.num_txqs); req->resc_request.num_rxqs = - bp->acquire_resp.resc.num_rxqs; + min(req->resc_request.num_rxqs, + bp->acquire_resp.resc.num_rxqs); req->resc_request.num_sbs = - bp->acquire_resp.resc.num_sbs; + min(req->resc_request.num_sbs, + bp->acquire_resp.resc.num_sbs); req->resc_request.num_mac_filters = - bp->acquire_resp.resc.num_mac_filters; + min(req->resc_request.num_mac_filters, + bp->acquire_resp.resc.num_mac_filters); req->resc_request.num_vlan_filters = - bp->acquire_resp.resc.num_vlan_filters; + min(req->resc_request.num_vlan_filters, + bp->acquire_resp.resc.num_vlan_filters); req->resc_request.num_mc_filters = - bp->acquire_resp.resc.num_mc_filters; + min(req->resc_request.num_mc_filters, + bp->acquire_resp.resc.num_mc_filters); /* Clear response buffer */ memset(&bp->vf2pf_mbox->resp, 0, @@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bp->common.flash_size = 0; bp->flags |= NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; - bp->igu_sb_cnt = 1; + bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, sizeof(bp->fw_ver)); @@ -452,6 +458,53 @@ free_irq: bnx2x_free_irq(bp); } +static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + + /* mac */ + bnx2x_init_mac_obj(bp, &q->mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, mac_rdata), + bnx2x_vf_sp_map(bp, vf, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->macs_pool); + /* vlan */ + bnx2x_init_vlan_obj(bp, &q->vlan_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->vlans_pool); + + /* mcast */ + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, + q->cid, func_id, func_id, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* rss */ + bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, + func_id, func_id, + bnx2x_vf_sp(bp, vf, rss_rdata), + bnx2x_vf_sp_map(bp, vf, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + vf->leading_rss = cl_id; + q->is_leading = true; +} + /* ask the pf to open a queue for the vf */ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) { @@ -948,7 +1001,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, /* fill in pfdev info */ resp->pfdev_info.chip_num = bp->common.chip_id; - resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); + resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); @@ -1054,8 +1107,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, /* record ghost addresses from vf message */ vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; + vf->stats_stride = init->stats_stride; vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + /* set VF multiqueue statistics collection mode */ + if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) + vf->cfg_flags |= VF_CFG_STATS_COALESCE; + /* response */ bnx2x_vf_mbx_resp(bp, vf); } @@ -1080,6 +1138,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS) + __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags); /* outer vlan removal is set according to PF's multi function mode */ if (IS_MF_SD(bp)) @@ -1113,6 +1173,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_setup_params *setup_p; + if (bnx2x_vfq_is_leading(q)) + bnx2x_leading_vfq_init(bp, vf, q); + /* re-init the VF operation context */ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); setup_p = &vf->op_params.qctor.prep_qsetup; @@ -1552,6 +1615,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_resp(bp, vf); } +static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; + struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + + if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || + rss_tlv->rss_key_size != T_ETH_RSS_KEY) { + BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", + vf->index); + vf->op_rc = -EINVAL; + goto mbx_resp; + } + + /* set vfop params according to rss tlv */ + memcpy(vf_op_params->ind_table, rss_tlv->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(vf_op_params->rss_key, rss_tlv->rss_key, + sizeof(rss_tlv->rss_key)); + vf_op_params->rss_obj = &vf->rss_conf_obj; + vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) + __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) + __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) + __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4) + __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) + __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) + __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6) + __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) + __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) + __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); + + if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || + (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { + BNX2X_ERR("about to hit a FW assert. aborting...\n"); + vf->op_rc = -EINVAL; + goto mbx_resp; + } + + vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); + +mbx_resp: + if (vf->op_rc) + bnx2x_vf_mbx_resp(bp, vf); +} + /* dispatch request */ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) @@ -1588,6 +1713,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, case CHANNEL_TLV_RELEASE: bnx2x_vf_mbx_release_vf(bp, vf, mbx); break; + case CHANNEL_TLV_UPDATE_RSS: + bnx2x_vf_mbx_update_rss(bp, vf, mbx); + break; } } else { @@ -1607,7 +1735,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, /* test whether we can respond to the VF (do we have an address * for it?) */ - if (vf->state == VF_ACQUIRED) { + if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { /* mbx_resp uses the op_rc of the VF */ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index f3ad174..1179fe0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -51,6 +51,7 @@ struct hw_sb_info { #define VFPF_QUEUE_FLG_COS 0x0080 #define VFPF_QUEUE_FLG_HC 0x0100 #define VFPF_QUEUE_FLG_DHC 0x0200 +#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400 #define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) #define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) @@ -131,6 +132,27 @@ struct vfpf_q_op_tlv { u8 padding[3]; }; +/* receive side scaling tlv */ +struct vfpf_rss_tlv { + struct vfpf_first_tlv first_tlv; + u32 rss_flags; +#define VFPF_RSS_MODE_DISABLED (1 << 0) +#define VFPF_RSS_MODE_REGULAR (1 << 1) +#define VFPF_RSS_SET_SRCH (1 << 2) +#define VFPF_RSS_IPV4 (1 << 3) +#define VFPF_RSS_IPV4_TCP (1 << 4) +#define VFPF_RSS_IPV4_UDP (1 << 5) +#define VFPF_RSS_IPV6 (1 << 6) +#define VFPF_RSS_IPV6_TCP (1 << 7) +#define VFPF_RSS_IPV6_UDP (1 << 8) + u8 rss_result_mask; + u8 ind_table_size; + u8 rss_key_size; + u8 padding; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY]; /* hash values */ +}; + /* acquire response tlv - carries the allocated resources */ struct pfvf_acquire_resp_tlv { struct pfvf_tlv hdr; @@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv { } resc; }; +#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues + * stats will be coalesced on + * the leading RSS queue + */ + /* Init VF */ struct vfpf_init_tlv { struct vfpf_first_tlv first_tlv; aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ aligned_u64 spq_addr; aligned_u64 stats_addr; + u16 stats_stride; + u32 flags; + u32 padding[2]; }; /* Setup Queue */ @@ -293,13 +323,14 @@ union vfpf_tlvs { struct vfpf_q_op_tlv q_op; struct vfpf_setup_q_tlv setup_q; struct vfpf_set_q_filters_tlv set_q_filters; - struct vfpf_release_tlv release; - struct channel_list_end_tlv list_end; + struct vfpf_release_tlv release; + struct vfpf_rss_tlv update_rss; + struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; union pfvf_tlvs { - struct pfvf_general_resp_tlv general_resp; + struct pfvf_general_resp_tlv general_resp; struct pfvf_acquire_resp_tlv acquire_resp; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; @@ -355,14 +386,18 @@ enum channel_tlvs { CHANNEL_TLV_INIT, CHANNEL_TLV_SETUP_Q, CHANNEL_TLV_SET_Q_FILTERS, + CHANNEL_TLV_ACTIVATE_Q, + CHANNEL_TLV_DEACTIVATE_Q, CHANNEL_TLV_TEARDOWN_Q, CHANNEL_TLV_CLOSE, CHANNEL_TLV_RELEASE, + CHANNEL_TLV_UPDATE_RSS_DEPRECATED, CHANNEL_TLV_PF_RELEASE_VF, CHANNEL_TLV_LIST_END, CHANNEL_TLV_FLR, CHANNEL_TLV_PF_SET_MAC, CHANNEL_TLV_PF_SET_VLAN, + CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_MAX }; -- cgit v0.10.2 From 60cad4e67bd6ff400e7ea61fe762b3042b12ae9d Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Wed, 4 Sep 2013 14:09:22 +0300 Subject: bnx2x: VF RSS support - VF side In this patch capabilities are added to the Vf driver to request multiple queues over the VF PF channel, and the logic for requesting rss configuration for said queues. Signed-off-by: Ariel Elior Signed-off-by: Eilong Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e7400d9..8d726f6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1942,7 +1942,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) } } -static int bnx2x_init_rss_pf(struct bnx2x *bp) +static int bnx2x_init_rss(struct bnx2x *bp) { int i; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); @@ -1966,8 +1966,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); } -int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash) +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable) { struct bnx2x_config_rss_params params = {NULL}; @@ -1982,17 +1982,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); - __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); - - /* RSS configuration */ - __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v4) - __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v6) - __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + if (enable) { + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); + + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v4) + __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v6) + __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + } else { + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + } /* Hash bits */ params.rss_result_mask = MULTI_MASK; @@ -2001,11 +2005,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (config_hash) { /* RSS keys */ - prandom_bytes(params.rss_key, sizeof(params.rss_key)); + prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4); __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); } - return bnx2x_config_rss(bp, ¶ms); + if (IS_PF(bp)) + return bnx2x_config_rss(bp, ¶ms); + else + return bnx2x_vfpf_config_rss(bp, ¶ms); } static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) @@ -2645,38 +2652,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* initialize FW coalescing state machines in RAM */ bnx2x_update_coalesce(bp); + } - /* setup the leading queue */ - rc = bnx2x_setup_leading(bp); - if (rc) { - BNX2X_ERR("Setup leading failed!\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - - /* set up the rest of the queues */ - for_each_nondefault_eth_queue(bp, i) { - rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - } + /* setup the leading queue */ + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } - /* setup rss */ - rc = bnx2x_init_rss_pf(bp); + /* set up the rest of the queues */ + for_each_nondefault_eth_queue(bp, i) { + if (IS_PF(bp)) + rc = bnx2x_setup_queue(bp, &bp->fp[i], false); + else /* VF */ + rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); if (rc) { - BNX2X_ERR("PF RSS init failed\n"); + BNX2X_ERR("Queue %d setup failed\n", i); LOAD_ERROR_EXIT(bp, load_error3); } + } - } else { /* vf */ - for_each_eth_queue(bp, i) { - rc = bnx2x_vfpf_setup_q(bp, i); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - } + /* setup rss */ + rc = bnx2x_init_rss(bp); + if (rc) { + BNX2X_ERR("PF RSS init failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); } /* Now when Clients are configured we are ready to work */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index affb764..da8fcaa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -105,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); * @rss_obj: RSS object to use * @ind_table: indirection table to configure * @config_hash: re-configure RSS hash keys configuration + * @enable: enabled or disabled configuration */ -int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash); +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable); /** * bnx2x__init_func_obj - init function object @@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn) static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) { - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); + return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); } /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index c5f2251..2612e3c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); } else if ((info->flow_type == UDP_V6_FLOW) && (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); } return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5729aa7..c69990d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8060,7 +8060,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) int bnx2x_setup_leading(struct bnx2x *bp) { - return bnx2x_setup_queue(bp, &bp->fp[0], 1); + if (IS_PF(bp)) + return bnx2x_setup_queue(bp, &bp->fp[0], true); + else /* VF */ + return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); } /** @@ -8074,8 +8077,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp) { int rc = 0; - if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) + if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { + BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); return -EINVAL; + } switch (int_mode) { case BNX2X_INT_MODE_MSIX: @@ -11658,9 +11663,11 @@ static int bnx2x_init_bp(struct bnx2x *bp) * second status block for the L2 queue, and a third status block for * CNIC if supported. */ - if (CNIC_SUPPORT(bp)) + if (IS_VF(bp)) + bp->min_msix_vec_cnt = 1; + else if (CNIC_SUPPORT(bp)) bp->min_msix_vec_cnt = 3; - else + else /* PF w/o cnic */ bp->min_msix_vec_cnt = 2; BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); @@ -12571,8 +12578,7 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) * @dev: pci device * */ -static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, - int cnic_cnt, bool is_vf) +static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) { int index; u16 control = 0; @@ -12598,7 +12604,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, index = control & PCI_MSIX_FLAGS_QSIZE; - return is_vf ? index + 1 : index; + return index; } static int set_max_cos_est(int chip_id) @@ -12678,10 +12684,13 @@ static int bnx2x_init_one(struct pci_dev *pdev, is_vf = set_is_vf(ent->driver_data); cnic_cnt = is_vf ? 0 : 1; - max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); + + /* add another SB for VF as it has no default SB */ + max_non_def_sbs += is_vf ? 1 : 0; /* Maximum number of RSS queues: one IGU SB goes to CNIC */ - rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; + rss_count = max_non_def_sbs - cnic_cnt; if (rss_count < 1) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 8e9847f..2a8c1dc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -746,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_init(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp); -int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading); int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params); int bnx2x_vfpf_set_mcast(struct net_device *dev); int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); @@ -809,7 +812,7 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} -static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } +static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) {return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index a7e88a4..6cfb887 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -379,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp) req->stats_addr = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats); + req->stats_stride = sizeof(struct per_queue_stats); + /* add list termination tlv */ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -506,11 +508,12 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, } /* ask the pf to open a queue for the vf */ -int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading) { struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; - struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u8 fp_idx = fp->index; u16 tpa_agg_size = 0, flags = 0; int rc; @@ -526,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) tpa_agg_size = TPA_AGG_SIZE; } + if (is_leading) + flags |= VFPF_QUEUE_FLG_LEADING_RSS; + /* calculate queue flags */ flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; @@ -699,6 +705,71 @@ out: return 0; } +/* request pf to config rss table for vf queues*/ +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) +{ + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; + int rc = 0; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, + sizeof(*req)); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key)); + req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + req->rss_key_size = T_ETH_RSS_KEY; + req->rss_result_mask = params->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED)) + req->rss_flags |= VFPF_RSS_MODE_DISABLED; + if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR)) + req->rss_flags |= VFPF_RSS_MODE_REGULAR; + if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH)) + req->rss_flags |= VFPF_RSS_SET_SRCH; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4)) + req->rss_flags |= VFPF_RSS_IPV4; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP)) + req->rss_flags |= VFPF_RSS_IPV4_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP)) + req->rss_flags |= VFPF_RSS_IPV4_UDP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6)) + req->rss_flags |= VFPF_RSS_IPV6; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP)) + req->rss_flags |= VFPF_RSS_IPV6_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP)) + req->rss_flags |= VFPF_RSS_IPV6_UDP; + + DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n", + resp->hdr.status); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return 0; +} + int bnx2x_vfpf_set_mcast(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); -- cgit v0.10.2 From b55b76b22144ab97cefcb3862bab61f088adf411 Mon Sep 17 00:00:00 2001 From: Duan Jiong Date: Wed, 4 Sep 2013 19:44:21 +0800 Subject: ipv6:introduce function to find route for redirect RFC 4861 says that the IP source address of the Redirect is the same as the current first-hop router for the specified ICMP Destination Address, so the gateway should be taken into consideration when we find the route for redirect. There was once a check in commit a6279458c534d01ccc39498aba61c93083ee0372 ("NDISC: Search over all possible rules on receipt of redirect.") and the check went away in commit b94f1c0904da9b8bf031667afc48080ba7c3e8c9 ("ipv6: Use icmpv6_notify() to propagate redirect, instead of rt6_redirect()"). The bug is only "exploitable" on layer-2 because the source address of the redirect is checked to be a valid link-local address but it makes spoofing a lot easier in the same L2 domain nonetheless. Thanks very much for Hannes's help. Signed-off-by: Duan Jiong Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index bb02e17..73784c3 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -628,7 +628,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, return; if (type == NDISC_REDIRECT) - ip6_redirect(skb, net, 0, 0); + ip6_redirect(skb, net, skb->dev->ifindex, 0); else ip6_update_pmtu(skb, net, info, 0, 0); xfrm_state_put(x); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index aeac0dc..d3618a7 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -447,7 +447,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, return; if (type == NDISC_REDIRECT) - ip6_redirect(skb, net, 0, 0); + ip6_redirect(skb, net, skb->dev->ifindex, 0); else ip6_update_pmtu(skb, net, info, 0, 0); xfrm_state_put(x); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 7cfc8d2..73681c2 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -92,7 +92,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (type == ICMPV6_PKT_TOOBIG) ip6_update_pmtu(skb, net, info, 0, 0); else if (type == NDISC_REDIRECT) - ip6_redirect(skb, net, 0, 0); + ip6_redirect(skb, net, skb->dev->ifindex, 0); if (!(type & ICMPV6_INFOMSG_MASK)) if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 7af5aee..5636a91 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -76,7 +76,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, return; if (type == NDISC_REDIRECT) - ip6_redirect(skb, net, 0, 0); + ip6_redirect(skb, net, skb->dev->ifindex, 0); else ip6_update_pmtu(skb, net, info, 0, 0); xfrm_state_put(x); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 2221065..c4bc7a3 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1367,7 +1367,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) return; if (!ndopts.nd_opts_rh) { - ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0); + ip6_redirect_no_header(skb, dev_net(skb->dev), + skb->dev->ifindex, 0); return; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b770085..c979dd9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1156,6 +1156,77 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) } EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); +/* Handle redirects */ +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +static struct rt6_info *__ip6_route_redirect(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + int flags) +{ + struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; + struct rt6_info *rt; + struct fib6_node *fn; + + /* Get the "current" route for this destination and + * check if the redirect has come from approriate router. + * + * RFC 4861 specifies that redirects should only be + * accepted if they come from the nexthop to the target. + * Due to the way the routes are chosen, this notion + * is a bit fuzzy and one might need to check all possible + * routes. + */ + + read_lock_bh(&table->tb6_lock); + fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); +restart: + for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { + if (rt6_check_expired(rt)) + continue; + if (rt->dst.error) + break; + if (!(rt->rt6i_flags & RTF_GATEWAY)) + continue; + if (fl6->flowi6_oif != rt->dst.dev->ifindex) + continue; + if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) + continue; + break; + } + + if (!rt) + rt = net->ipv6.ip6_null_entry; + else if (rt->dst.error) { + rt = net->ipv6.ip6_null_entry; + goto out; + } + BACKTRACK(net, &fl6->saddr); +out: + dst_hold(&rt->dst); + + read_unlock_bh(&table->tb6_lock); + + return rt; +}; + +static struct dst_entry *ip6_route_redirect(struct net *net, + const struct flowi6 *fl6, + const struct in6_addr *gateway) +{ + int flags = RT6_LOOKUP_F_HAS_SADDR; + struct ip6rd_flowi rdfl; + + rdfl.fl6 = *fl6; + rdfl.gateway = *gateway; + + return fib6_rule_lookup(net, &rdfl.fl6, + flags, __ip6_route_redirect); +} + void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark) { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; @@ -1170,9 +1241,8 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark) fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); - dst = ip6_route_output(net, NULL, &fl6); - if (!dst->error) - rt6_do_redirect(dst, NULL, skb); + dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr); + rt6_do_redirect(dst, NULL, skb); dst_release(dst); } EXPORT_SYMBOL_GPL(ip6_redirect); @@ -1192,9 +1262,8 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, fl6.daddr = msg->dest; fl6.saddr = iph->daddr; - dst = ip6_route_output(net, NULL, &fl6); - if (!dst->error) - rt6_do_redirect(dst, NULL, skb); + dst = ip6_route_redirect(net, &fl6, &iph->saddr); + rt6_do_redirect(dst, NULL, skb); dst_release(dst); } -- cgit v0.10.2 From df95fc44c09f4c33e1d453674487a78f2779ba23 Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Wed, 4 Sep 2013 15:03:05 +0200 Subject: qlcnic: use standard NAPI weights Since commit 82dc3c63 ("net: introduce NAPI_POLL_WEIGHT") netif_napi_add() produces an error message if a NAPI poll weight greater than 64 is requested. qlcnic requests the weight as large as 256 for some of its rings, and smaller values for other rings. For instance in qlcnic_82xx_napi_add() I think the intention was to give the tx+rx ring a bigger weight than to rx-only rings, but it's actually doing the opposite. So I'm assuming the weights do not really matter much. Just use the standard NAPI weights for all rings. Signed-off-by: Michal Schmidt Acked-by: Himanshu Madhani Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 4615960..88349b8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -946,7 +946,6 @@ struct qlcnic_ipaddr { #define QLCNIC_PCI_REG_MSIX_TBL 0x44 #define QLCNIC_MSIX_TBL_PGSIZE 4096 -#define QLCNIC_NETDEV_WEIGHT 128 #define QLCNIC_ADAPTER_UP_MAGIC 777 #define __QLCNIC_FW_ATTACHED 0 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 8d06f88..b7b245b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1458,7 +1458,7 @@ void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { - int ring, max_sds_rings; + int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; @@ -1466,25 +1466,22 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; - max_sds_rings = adapter->max_sds_rings; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test && (adapter->max_drv_tx_rings > 1)) { netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); + NAPI_POLL_WEIGHT); } else { if (ring == (adapter->max_sds_rings - 1)) netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, - QLCNIC_NETDEV_WEIGHT / - max_sds_rings); + NAPI_POLL_WEIGHT); else netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); + NAPI_POLL_WEIGHT); } } @@ -1497,7 +1494,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, - QLCNIC_NETDEV_WEIGHT); + NAPI_POLL_WEIGHT); } } @@ -1963,7 +1960,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { - int ring, max_sds_rings, temp; + int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; @@ -1971,25 +1968,22 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; - max_sds_rings = adapter->max_sds_rings; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (adapter->flags & QLCNIC_MSIX_ENABLED) { - if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); - } else { - temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings; + NAPI_POLL_WEIGHT); + else netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_msix_sriov_vf_poll, - temp); - } + NAPI_POLL_WEIGHT); } else { netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_poll, - QLCNIC_NETDEV_WEIGHT / max_sds_rings); + NAPI_POLL_WEIGHT); } } @@ -2004,7 +1998,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, qlcnic_83xx_msix_tx_poll, - QLCNIC_NETDEV_WEIGHT); + NAPI_POLL_WEIGHT); } } -- cgit v0.10.2 From 4bfb0513ff203a700f5d17b97b772e8c171549bc Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 5 Sep 2013 17:53:59 +0800 Subject: tuntap: purge socket error queue on detach Commit eda297729171fe16bf34fe5b0419dfb69060f623 (tun: Support software transmit time stamping) will queue skbs into error queue when tx stamping is enabled. But it forgets to purge the error queue during detach. This patch fixes this. Cc: Richard Cochran Acked-by: Richard Cochran Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 60a1e93..2dddb1b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -409,6 +409,12 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile) return tun; } +static void tun_queue_purge(struct tun_file *tfile) +{ + skb_queue_purge(&tfile->sk.sk_receive_queue); + skb_queue_purge(&tfile->sk.sk_error_queue); +} + static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; @@ -435,7 +441,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); /* Drop read queue */ - skb_queue_purge(&tfile->sk.sk_receive_queue); + tun_queue_purge(tfile); tun_set_real_num_queues(tun); } else if (tfile->detached && clean) { tun = tun_enable_queue(tfile); @@ -487,12 +493,12 @@ static void tun_detach_all(struct net_device *dev) for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); /* Drop read queue */ - skb_queue_purge(&tfile->sk.sk_receive_queue); + tun_queue_purge(tfile); sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); - skb_queue_purge(&tfile->sk.sk_receive_queue); + tun_queue_purge(tfile); sock_put(&tfile->sk); } BUG_ON(tun->numdisabled != 0); -- cgit v0.10.2 From 7bf6630523a4fddcc3e37bc37dadbe0cf2362354 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 5 Sep 2013 17:54:00 +0800 Subject: tuntap: orphan frags before trying to set tx timestamp sock_tx_timestamp() will clear all zerocopy flags of skb which may lead the frags never to be orphaned. This will break guest to guest traffic when zerocopy is enabled. Fix this by orphaning the frags before trying to set tx time stamp. The issue were introduced by commit eda297729171fe16bf34fe5b0419dfb69060f623 (tun: Support software transmit time stamping). Cc: Richard Cochran Cc: Sergei Shtylyov Acked-by: Richard Cochran Signed-off-by: Jason Wang Signed-off-by: David S. Miller diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2dddb1b..a639de8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -749,15 +749,17 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) >= dev->tx_queue_len / tun->numqueues) goto drop; + if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) + goto drop; + if (skb->sk) { sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); sw_tx_timestamp(skb); } /* Orphan the skb - required as we might hang on to it - * for indefinite time. */ - if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) - goto drop; + * for indefinite time. + */ skb_orphan(skb); nf_reset(skb); -- cgit v0.10.2 From 27082ee1b92f4d41e78b85fe40f6ab39673fba00 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 4 Sep 2013 17:17:15 +0530 Subject: ethernet/arc/arc_emac: Fix huge delays in large file copies copying large files to a NFS mounted host was taking absurdly large time. Turns out that TX BD reclaim had a sublte bug. Loop starts off from @txbd_dirty cursor and stops when it hits a BD still in use by controller. However when it stops it needs to keep the cursor at that very BD to resume scanning in next iteration. However it was erroneously incrementing the cursor, causing the next scan(s) to fail too, unless the BD chain was completely drained out. [ARCLinux]$ ls -l -sh /disk/log.txt 17976 -rw-r--r-- 1 root root 17.5M Sep /disk/log.txt ========== Before ===================== [ARCLinux]$ time cp /disk/log.txt /mnt/. real 31m 7.95s user 0m 0.00s sys 0m 0.10s ========== After ===================== [ARCLinux]$ time cp /disk/log.txt /mnt/. real 0m 24.33s user 0m 0.00s sys 0m 0.19s Signed-off-by: Vineet Gupta Cc: Alexey Brodkin (commit_signer:3/4=75%) Cc: "David S. Miller" (commit_signer:3/4=75%) Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: arc-linux-dev@synopsys.com Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 55d79cb..9e16014 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev) struct sk_buff *skb = tx_buff->skb; unsigned int info = le32_to_cpu(txbd->info); - *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; - if ((info & FOR_EMAC) || !txbd->data) break; @@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev) txbd->data = 0; txbd->info = 0; + *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; + if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } -- cgit v0.10.2 From dfafb73f355940989507c56b9282773132e86a9f Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Wed, 4 Sep 2013 11:26:01 -0700 Subject: icplus: Use netif_running to determine device state Remove the __LINK_STATE_START check to verify the device is running, in favor of netif_running(). netif_running() performs the same check of __LINK_STATE_START, so the code should behave the same. Signed-off-by: Jon Mason Cc: Francois Romieu Cc: Sorbica Shieh Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index 1fde90b..bdf5023 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -1004,7 +1004,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) /* Check to see if the NIC has been initialized via nic_open, * before trying to read statistic registers. */ - if (!test_bit(__LINK_STATE_START, &dev->state)) + if (!netif_running(dev)) return &sp->stats; sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); -- cgit v0.10.2 From 714086029116b6b0a34e67ba1dd2f0d1cf26770c Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Wed, 4 Sep 2013 16:21:18 +0200 Subject: net: mvneta: properly disable HW PHY polling and ensure adjust_link() works MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit fixes a long-standing bug that has been reported by many users: on some Armada 370 platforms, only the network interface that has been used in U-Boot to tftp the kernel works properly in Linux. The other network interfaces can see a 'link up', but are unable to transmit data. The reports were generally made on the Armada 370-based Mirabox, but have also been given on the Armada 370-RD board. The network MAC in the Armada 370/XP (supported by the mvneta driver in Linux) has a functionality that allows it to continuously poll the PHY and directly update the MAC configuration accordingly (speed, duplex, etc.). The very first versions of the driver submitted for review were using this hardware mechanism, but due to this, the driver was not integrated with the kernel phylib. Following reviews, the driver was changed to use the phylib, and therefore a software based polling. In software based polling, Linux regularly talks to the PHY over the MDIO bus, and sees if the link status has changed. If it's the case then the adjust_link() callback of the driver is called to update the MAC configuration accordingly. However, it turns out that the adjust_link() callback was not configuring the hardware in a completely correct way: while it was setting the speed and duplex bits correctly, it wasn't telling the hardware to actually take into account those bits rather than what the hardware-based PHY polling mechanism has concluded. So, in fact the adjust_link() callback was basically a no-op. However, the network happened to be working because on the network interfaces used by U-Boot for tftp on Armada 370 platforms because the hardware PHY polling was enabled by the bootloader, and left enabled by Linux. However, the second network interface not used for tftp (or both network interfaces if the kernel is loaded from USB, NAND or SD card) didn't had the hardware PHY polling enabled. This patch fixes this situation by: (1) Making sure that the hardware PHY polling is disabled by clearing the MVNETA_PHY_POLLING_ENABLE bit in the MVNETA_UNIT_CONTROL register in the driver ->probe() function. (2) Making sure that the duplex and speed selections made by the adjust_link() callback are taken into account by clearing the MVNETA_GMAC_AN_SPEED_EN and MVNETA_GMAC_AN_DUPLEX_EN bits in the MVNETA_GMAC_AUTONEG_CONFIG register. This patch has been tested on Armada 370 Mirabox, and now both network interfaces are usable after boot. [ Problem introduced by commit c5aff18 ("net: mvneta: driver for Marvell Armada 370/XP network unit") ] Signed-off-by: Thomas Petazzoni Cc: Willy Tarreau Cc: Jochen De Smet Cc: Peter Sanford Cc: Ethan Tuttle Cc: Chény Yves-Gael Cc: Ryan Press Cc: Simon Guinot Cc: vdonnefort@lacie.com Cc: stable@vger.kernel.org Acked-by: Jason Cooper Tested-by: Vincent Donnefort Tested-by: Yves-Gael Cheny Tested-by: Gregory CLEMENT Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b017818..90ab292 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -138,7 +138,9 @@ #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVNETA_GMAC_AN_SPEED_EN BIT(7) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) #define MVNETA_MIB_COUNTERS_BASE 0x3080 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 @@ -915,6 +917,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); + /* Disable PHY polling in hardware, since we're using the + * kernel phylib to do this. + */ + val = mvreg_read(pp, MVNETA_UNIT_CONTROL); + val &= ~MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2307,7 +2316,9 @@ static void mvneta_adjust_link(struct net_device *ndev) val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); + MVNETA_GMAC_CONFIG_FULL_DUPLEX | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); if (phydev->duplex) val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; -- cgit v0.10.2 From 15f594563d85825ec242df6d37e64e7dfe823df8 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Wed, 4 Sep 2013 16:26:52 +0200 Subject: net: mvneta: implement ->ndo_do_ioctl() to support PHY ioctls This commit implements the ->ndo_do_ioctl() operation so that the PHY-related ioctl() calls can work from userspace, which allows applications like mii-tool or mii-diag to do their job. Signed-off-by: Thomas Petazzoni Tested-by: Gregory CLEMENT Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 90ab292..389a854 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2451,6 +2451,21 @@ static int mvneta_stop(struct net_device *dev) return 0; } +static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + if (!pp->phy_dev) + return -ENOTSUPP; + + ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); + if (!ret) + mvneta_adjust_link(dev); + + return ret; +} + /* Ethtool methods */ /* Get settings (phy address, speed) for ethtools */ @@ -2569,6 +2584,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_change_mtu = mvneta_change_mtu, .ndo_tx_timeout = mvneta_tx_timeout, .ndo_get_stats64 = mvneta_get_stats64, + .ndo_do_ioctl = mvneta_ioctl, }; const struct ethtool_ops mvneta_eth_tool_ops = { -- cgit v0.10.2 From f011baf95e9d3e06e63132c9f8f7307def2e6753 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Wed, 4 Sep 2013 14:12:21 -0700 Subject: vxlan: Fix kernel panic on device delete. On vxlan device create if socket create fails vxlan device is not added to hash table. Therefore we need to check if device is in hashtable before we delete it from hlist. Following patch avoid the crash. net-next already has this fix. ---8<--- BUG: unable to handle kernel NULL pointer dereference at (null) IP: [] vxlan_dellink+0x77/0xf0 [vxlan] PGD 42b2d9067 PUD 42e04c067 PMD 0 Oops: 0002 [#1] SMP Modules linked in: vxlan(-) Hardware name: Dell Inc. PowerEdge R620/0KCKR5, BIOS 1.4.8 10/25/2012 task: ffff88042ecf8760 ti: ffff88042f106000 task.ti: ffff88042f106000 RIP: 0010:[] [] vxlan_dellink+0x77/0xf0 [vxlan] RSP: 0018:ffff88042f107e28 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff88082af08000 RCX: ffff88083fd80000 RDX: 0000000000000000 RSI: ffff88042f107e58 RDI: ffff88042e12f810 RBP: ffff88042f107e48 R08: ffffffff8166eca0 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000000 R12: ffff88082af087c0 R13: ffff88042e12f000 R14: ffff88042f107e58 R15: ffff88042f107e58 FS: 00007f4ed2de7700(0000) GS:ffff88043fc80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000042e076000 CR4: 00000000000407e0 Stack: ffff88082af08000 ffffffff81654848 ffffffffa05fb4e0 ffffffff81654780 ffff88042f107e98 ffffffff813b9c7a ffff88042f107e58 ffff88042f107e58 ffff88042f107e88 ffffffffa05fb4e0 ffffffffa05fb780 ffff88042f107f18 Call Trace: [] __rtnl_link_unregister+0xca/0xd0 [] rtnl_link_unregister+0x19/0x30 [] vxlan_cleanup_module+0x10/0x2f [vxlan] [] SyS_delete_module+0x1cf/0x2c0 [] ? do_page_fault+0x9/0x10 [] system_call_fastpath+0x16/0x1b Code: 4d 85 ed 0f 84 95 00 00 00 4c 8d a7 c0 07 00 00 49 8d bd 10 08 00 00 e8 28 e8 e6 e0 48 8b 83 c0 07 00 00 49 8b 54 24 08 48 85 c0 <48> 89 02 74 04 48 89 50 08 49 b8 00 02 20 00 00 00 ad de 4d 89 RIP [] vxlan_dellink+0x77/0xf0 [vxlan] RSP CR2: 0000000000000000 Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 767f7af..2c15f6c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1794,7 +1794,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) struct vxlan_dev *vxlan = netdev_priv(dev); spin_lock(&vn->sock_lock); - hlist_del_rcu(&vxlan->hlist); + if (!hlist_unhashed(&vxlan->hlist)) + hlist_del_rcu(&vxlan->hlist); spin_unlock(&vn->sock_lock); list_del(&vxlan->next); -- cgit v0.10.2 From c0a77ec74f295013d7ba3204dd3ed25fccf83cb4 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 4 Sep 2013 23:46:58 -0400 Subject: bnx2x: Add missing braces in bnx2x:bnx2x_link_initialize The indentation here implies that the intent was for this to be a multiline if. Introduced a few years ago in commit ec146a6f019923819f5ca381980248b6d154ca1a ("bnx2x: Modify XGXS functions") Signed-off-by: Dave Jones Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 9d64b98..6645684 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params, struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; if (vars->line_speed == SPEED_AUTO_NEG && (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp))) + CHIP_IS_E2(bp))) { bnx2x_set_parallel_detection(phy, params); if (params->phy[INT_PHY].config_init) params->phy[INT_PHY].config_init(phy, params, vars); + } } /* Init external phy*/ -- cgit v0.10.2 From 0c1db731bfcf3a9fd6c58132134f8b0f423552f0 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Thu, 5 Sep 2013 00:11:19 -0400 Subject: caif: Add missing braces to multiline if in cfctrl_linkup_request The indentation here implies this was meant to be a multi-line if. Introduced several years back in commit c85c2951d4da1236e32f1858db418221e624aba5 ("caif: Handle dev_queue_xmit errors.") Signed-off-by: Dave Jones Signed-off-by: David S. Miller diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 2bd4b58..0f45522 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -293,9 +293,10 @@ int cfctrl_linkup_request(struct cflayer *layer, count = cfctrl_cancel_req(&cfctrl->serv.layer, user_layer); - if (count != 1) + if (count != 1) { pr_err("Could not remove request (%d)", count); return -ENODEV; + } } return 0; } -- cgit v0.10.2 From e2e5c4c07caf810d7849658dca42f598b3938e21 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Thu, 5 Sep 2013 13:43:34 -0400 Subject: tcp: Add missing braces to do_tcp_setsockopt Signed-off-by: Dave Jones Acked-by: Neal Cardwell Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b2f6c74..95544e4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2454,10 +2454,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level, case TCP_THIN_DUPACK: if (val < 0 || val > 1) err = -EINVAL; - else + else { tp->thin_dupack = val; if (tp->thin_dupack) tcp_disable_early_retrans(tp); + } break; case TCP_REPAIR: -- cgit v0.10.2 From 1a5bbfc3d6b700178b75743a2ba1fd2e58a8f36f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 5 Sep 2013 14:38:03 -0400 Subject: netfilter: Fix build errors with xt_socket.c As reported by Randy Dunlap: ==================== when CONFIG_IPV6=m and CONFIG_NETFILTER_XT_MATCH_SOCKET=y: net/built-in.o: In function `socket_mt6_v1_v2': xt_socket.c:(.text+0x51b55): undefined reference to `udp6_lib_lookup' net/built-in.o: In function `socket_mt_init': xt_socket.c:(.init.text+0x1ef8): undefined reference to `nf_defrag_ipv6_enable' ==================== Like several other modules under net/netfilter/ we have to have a dependency "IPV6 disabled or set compatibly with this module" clause. Reported-by: Randy Dunlap Signed-off-by: David S. Miller diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 62a171a..6e839b6 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -1175,6 +1175,7 @@ config NETFILTER_XT_MATCH_SOCKET depends on NETFILTER_XTABLES depends on NETFILTER_ADVANCED depends on !NF_CONNTRACK || NF_CONNTRACK + depends on (IPV6 || IPV6=n) select NF_DEFRAG_IPV4 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES help -- cgit v0.10.2 From 0d40f75bdab241868c0eb6f97aef9f8b3a66f7b3 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Thu, 5 Sep 2013 12:17:05 -0700 Subject: openvswitch: Fix alignment of struct sw_flow_key. sw_flow_key alignment was declared as " __aligned(__alignof__(long))". However, this breaks on the m68k architecture where long is 32 bit in size but 16 bit aligned by default. This aligns to the size of a long to ensure that we can always do comparsions in full long-sized chunks. It also adds an additional build check to catch any reduction in alignment. CC: Andy Zhou Reported-by: Fengguang Wu Reported-by: Geert Uytterhoeven Signed-off-by: Jesse Gross Signed-off-by: David S. Miller diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index ad1aeeb..fb36f85 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -1981,6 +1981,7 @@ nla_put_failure: * Returns zero if successful or a negative error code. */ int ovs_flow_init(void) { + BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index b65f885..212fbf7 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -125,7 +125,7 @@ struct sw_flow_key { } nd; } ipv6; }; -} __aligned(__alignof__(long)); +} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ struct sw_flow { struct rcu_head rcu; -- cgit v0.10.2