From 765aeed9b058048806b1a0baf21f5d214c894703 Mon Sep 17 00:00:00 2001 From: Ioana Radulescu Date: Fri, 3 May 2013 15:54:19 +0000 Subject: dpaa_eth: Refactor FQ initialization code The initialization code for QMan frame queues was becoming unmanageable, so reorganize it a bit. At probe time, all FQs are put in a linked list and afterwards are configured and initialized based on their type. Change-Id: I69625117fc50a78146599003f6c2089d58c2364f Signed-off-by: Ioana Radulescu Signed-off-by: Madalin Bucur Change-Id: I3d1b28e2c6ad189094115f51cc766b8f0499b88b Reviewed-on: http://git.am.freescale.net:8181/3071 Reviewed-by: Hamciuc Bogdan-BHAMCIU1 Reviewed-by: Sovaiala Cristian-Constantin-B39531 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h index b3b0fd1..738053c 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h @@ -48,7 +48,8 @@ enum dpa_fq_type { FQ_TYPE_RX_ERROR, /* Rx Error FQs */ FQ_TYPE_RX_PCD, /* User-defined PCDs */ FQ_TYPE_TX, /* "Real" Tx FQs */ - FQ_TYPE_TX_CONFIRM, /* Tx Confirmation FQs (actually Rx FQs) */ + FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */ + FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */ #ifdef CONFIG_FSL_DPAA_TX_RECYCLE FQ_TYPE_TX_RECYCLE, /* Tx FQs for recycleable frames only */ diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c index 2d47c8a..d6eacfe 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c @@ -585,8 +585,7 @@ static struct qman_fq *_dpa_get_tx_conf_queue(const struct dpa_priv_s *priv, return NULL; } -static int __must_check __attribute__((nonnull)) -_dpa_fq_alloc(struct list_head *list, struct dpa_fq *dpa_fq) +static int dpa_fq_init(struct dpa_fq *dpa_fq) { int _errno; const struct dpa_priv_s *priv; @@ -637,7 +636,8 @@ _dpa_fq_alloc(struct list_head *list, struct dpa_fq *dpa_fq) * place them in the netdev's CGR, along with the Tx FQs. */ if (dpa_fq->fq_type == FQ_TYPE_TX || - dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM) { + dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM || + dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { initfq.we_mask |= QM_INITFQ_WE_CGID; initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE; initfq.fqd.cgid = priv->cgr_data.cgr.cgrid; @@ -735,7 +735,6 @@ _dpa_fq_alloc(struct list_head *list, struct dpa_fq *dpa_fq) } dpa_fq->fqid = qman_fq_fqid(fq); - list_add_tail(&dpa_fq->list, list); return 0; } @@ -3384,268 +3383,172 @@ static const struct fqid_cell tx_recycle_fqids[] = { }; #endif -static int -dpa_fq_probe(struct platform_device *_of_dev, struct list_head *list, - struct dpa_fq **defq, struct dpa_fq **errq, - struct dpa_fq **fqs, struct dpa_fq **txconfq, - struct dpa_fq **txrecycle, int ptype) +static struct dpa_fq *dpa_fq_alloc(struct device *dev, + const struct fqid_cell *fqids, + struct list_head *list, + enum dpa_fq_type fq_type) { - struct device *dev = &_of_dev->dev; - struct device_node *np = dev->of_node; - const struct fqid_cell *fqids; - int i, j, lenp; - int num_fqids; + int i; struct dpa_fq *dpa_fq; - int err = 0; - /* per-core tx confirmation queues */ - if (txconfq) { - fqids = tx_confirm_fqids; - dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[0].count, - GFP_KERNEL); - if (dpa_fq == NULL) { - dev_err(dev, "devm_kzalloc() failed\n"); - return -ENOMEM; - } - *txconfq = dpa_fq; - for (j = 0; j < fqids[0].count; j++) - dpa_fq[j].fq_type = FQ_TYPE_TX_CONFIRM; - - for (j = 0; j < fqids[0].count; j++) { - dpa_fq[j].fqid = fqids[0].start ? - fqids[0].start + j : 0; - _dpa_assign_wq(dpa_fq + j); - list_add_tail(&dpa_fq[j].list, list); - } + dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL); + if (dpa_fq == NULL) + return NULL; + + for (i = 0; i < fqids->count; i++) { + dpa_fq[i].fq_type = fq_type; + dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0; + _dpa_assign_wq(dpa_fq + i); + list_add_tail(&dpa_fq[i].list, list); } -#ifdef CONFIG_FSL_DPAA_TX_RECYCLE - /* per-core tx queues for recycleable frames (FManv3 only) */ - if (txrecycle) { - fqids = tx_recycle_fqids; - dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[0].count, - GFP_KERNEL); - if (dpa_fq == NULL) { - dev_err(dev, "devm_kzalloc() failed\n"); - return -ENOMEM; - } + return dpa_fq; +} - *txrecycle = dpa_fq; - for (j = 0; j < fqids[0].count; j++) - dpa_fq[j].fq_type = FQ_TYPE_TX_RECYCLE; +/* Probing of FQs for MACful ports */ +static int dpa_fq_probe_mac(struct device *dev, struct list_head *list, + struct fm_port_fqs *port_fqs, bool is_shared, + enum port_type ptype) +{ + const struct fqid_cell *fqids; + struct dpa_fq *dpa_fq; + struct device_node *np = dev->of_node; + int num_ranges; + int i, lenp; - for (j = 0; j < fqids[0].count; j++) { - dpa_fq[j].fqid = fqids[0].start ? - fqids[0].start + j : 0; - _dpa_assign_wq(dpa_fq + j); - list_add_tail(&dpa_fq[j].list, list); - } - } + if (ptype == TX && !is_shared) { + /* Use per-core tx confirmation queues on private ports */ + if (!dpa_fq_alloc(dev, tx_confirm_fqids, list, + FQ_TYPE_TX_CONF_MQ)) + goto fq_alloc_failed; + +#ifdef CONFIG_FSL_DPAA_TX_RECYCLE + /* per-core tx queues for recycleable frames (FManv3 only) */ + if (!dpa_fq_alloc(dev, tx_recycle_fqids, list, + FQ_TYPE_TX_RECYCLE)) + goto fq_alloc_failed; #endif + } fqids = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp); if (fqids == NULL) { + /* No dts definition, so use the defaults. */ fqids = default_fqids[ptype]; - num_fqids = 3; - } else - num_fqids = lenp / sizeof(*fqids); - - for (i = 0; i < num_fqids; i++) { - dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[i].count, - GFP_KERNEL); - if (dpa_fq == NULL) { - dev_err(dev, "devm_kzalloc() failed\n"); - return -ENOMEM; - } - - /* The first queue is the Error queue */ - if (i == 0 && errq) { - *errq = dpa_fq; - - if (fqids[i].count != 1) { - dev_err(dev, "Too many error queues!\n"); - err = -EINVAL; - goto invalid_error_queues; - } - - dpa_fq[0].fq_type = (ptype == RX ? - FQ_TYPE_RX_ERROR : FQ_TYPE_TX_ERROR); - } - - /* The second queue is the the Default queue */ - if (i == 1 && defq) { - *defq = dpa_fq; - - if (fqids[i].count != 1) { - dev_err(dev, "Too many default queues!\n"); - err = -EINVAL; - goto invalid_default_queues; - } + num_ranges = 3; + } else { + num_ranges = lenp / sizeof(*fqids); + } - dpa_fq[0].fq_type = (ptype == RX ? - FQ_TYPE_RX_DEFAULT : FQ_TYPE_TX_CONFIRM); - } + for (i = 0; i < num_ranges; i++) { + switch (i) { + case 0: + /* The first queue is the error queue */ + if (fqids[i].count != 1) + goto invalid_error_queue; - /* - * All subsequent queues are gathered together. - * The first 8 will be used by the private linux interface - * if these are TX queues - */ - if (i == 2 || (!errq && i == 0 && fqs)) { - *fqs = dpa_fq; + dpa_fq = dpa_fq_alloc(dev, &fqids[i], list, + ptype == RX ? + FQ_TYPE_RX_ERROR : + FQ_TYPE_TX_ERROR); + if (dpa_fq == NULL) + goto fq_alloc_failed; - for (j = 0; j < fqids[i].count; j++) - dpa_fq[j].fq_type = (ptype == RX ? - FQ_TYPE_RX_PCD : FQ_TYPE_TX); - } + if (ptype == RX) + port_fqs->rx_errq = &dpa_fq[0]; + else + port_fqs->tx_errq = &dpa_fq[0]; + break; + case 1: + /* the second queue is the default queue */ + if (fqids[i].count != 1) + goto invalid_default_queue; + + dpa_fq = dpa_fq_alloc(dev, &fqids[i], list, + ptype == RX ? + FQ_TYPE_RX_DEFAULT : + FQ_TYPE_TX_CONFIRM); + if (dpa_fq == NULL) + goto fq_alloc_failed; - for (j = 0; j < fqids[i].count; j++) { - dpa_fq[j].fqid = fqids[i].start ? - fqids[i].start + j : 0; - _dpa_assign_wq(dpa_fq + j); - list_add_tail(&dpa_fq[j].list, list); + if (ptype == RX) + port_fqs->rx_defq = &dpa_fq[0]; + else + port_fqs->tx_defq = &dpa_fq[0]; + break; + default: + /* all subsequent queues are either RX PCD or Tx */ + if (!dpa_fq_alloc(dev, &fqids[i], list, ptype == RX ? + FQ_TYPE_RX_PCD : FQ_TYPE_TX)) + goto fq_alloc_failed; + break; } } -invalid_default_queues: -invalid_error_queues: - return err; -} + return 0; -static void dpa_setup_ingress(struct dpa_priv_s *priv, struct dpa_fq *fq, - const struct qman_fq *template) -{ - fq->fq_base = *template; - fq->net_dev = priv->net_dev; +fq_alloc_failed: + dev_err(dev, "dpa_fq_alloc() failed\n"); + return -ENOMEM; - fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; - fq->channel = priv->channel; +invalid_default_queue: +invalid_error_queue: + dev_err(dev, "Too many default or error queues\n"); + return -EINVAL; } -static void dpa_setup_egress(struct dpa_priv_s *priv, - struct list_head *head, struct dpa_fq *fq, - struct fm_port *port) +/* Probing of FQs for MACless ports */ +static int dpa_fq_probe_macless(struct device *dev, struct list_head *list, + enum port_type ptype) { - struct list_head *ptr = &fq->list; - struct dpa_fq *iter; - int i = 0; - - while (true) { - iter = list_entry(ptr, struct dpa_fq, list); - if (priv->shared) - iter->fq_base = shared_egress_fq; - else - iter->fq_base = private_egress_fq; - - iter->net_dev = priv->net_dev; - if (port) { - iter->flags = QMAN_FQ_FLAG_TO_DCPORTAL; - iter->channel = fm_get_tx_port_channel(port); - } else - iter->flags = QMAN_FQ_FLAG_NO_MODIFY; - - if (list_is_last(ptr, head)) - break; + struct device_node *np = dev->of_node; + const struct fqid_cell *fqids; + int num_ranges; + int i, lenp; - ptr = ptr->next; + fqids = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp); + if (fqids == NULL) { + dev_err(dev, "Need FQ definition in dts for MACless devices\n"); + return -EINVAL; } - /* Allocate frame queues to all available CPUs no matter the number of - * queues specified in device tree. - */ - for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) { - iter = list_entry(ptr, struct dpa_fq, list); - priv->egress_fqs[i] = &iter->fq_base; + num_ranges = lenp / sizeof(*fqids); - if (list_is_last(ptr, head)) - ptr = &fq->list; + /* All ranges defined in the device tree are used as Rx/Tx queues */ + for (i = 0; i < num_ranges; i++) { + if (!dpa_fq_alloc(dev, &fqids[i], list, ptype == RX ? + FQ_TYPE_RX_PCD : FQ_TYPE_TX)) { + dev_err(dev, "_dpa_fq_alloc() failed\n"); + return -ENOMEM; + } } -} - -#ifdef CONFIG_FSL_DPAA_TX_RECYCLE -static void dpa_setup_recycle_queues(struct dpa_priv_s *priv, struct dpa_fq *fq, - struct fm_port *port) -{ - int i = 0; - struct list_head *ptr = &fq->list; - - for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) { - struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list); - - iter->fq_base = private_egress_fq; - iter->net_dev = priv->net_dev; - - priv->recycle_fqs[i] = &iter->fq_base; - iter->flags = QMAN_FQ_FLAG_TO_DCPORTAL; - iter->channel = fm_get_tx_port_channel(port); - - ptr = ptr->next; - } + return 0; } -#endif -static void dpa_setup_conf_queues(struct dpa_priv_s *priv, struct dpa_fq *fq) +static inline void dpa_setup_ingress(const struct dpa_priv_s *priv, + struct dpa_fq *fq, + const struct qman_fq *template) { - struct list_head *ptr = &fq->list; - int i; - - /* - * Configure the queues to be core affine. - * The implicit assumption here is that each cpu has its own Tx queue - */ - for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) { - struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list); - - dpa_setup_ingress(priv, iter, &tx_private_defq); - /* Leave the confirmation queue in the default pool channel */ - priv->conf_fqs[i] = &iter->fq_base; + fq->fq_base = *template; + fq->net_dev = priv->net_dev; - ptr = ptr->next; - } + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; + fq->channel = priv->channel; } -static void dpa_setup_ingress_queues(struct dpa_priv_s *priv, - struct list_head *head, struct dpa_fq *fq) +static inline void dpa_setup_egress(const struct dpa_priv_s *priv, + struct dpa_fq *fq, + struct fm_port *port, + const struct qman_fq *template) { - struct list_head *ptr = &fq->list; - u32 fqid; - int portals[NR_CPUS]; - int i, cpu, num_portals = 0; - const cpumask_t *affine_cpus = qman_affine_cpus(); - - for_each_cpu(cpu, affine_cpus) - portals[num_portals++] = qman_affine_channel(cpu); - if (num_portals == 0) { - dev_err(fq->net_dev->dev.parent, - "No Qman software (affine) channels found"); - return; - } - - i = 0; - fqid = 0; - if (priv->mac_dev) - fqid = (priv->mac_dev->res->start & 0x1fffff) >> 6; - - while (true) { - struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list); - - if (priv->shared) - dpa_setup_ingress(priv, iter, &rx_shared_fq); - else - dpa_setup_ingress(priv, iter, &rx_private_defq); - - if (!iter->fqid) - iter->fqid = fqid++; - - /* Assign the queues to a channel in a round-robin fashion */ - iter->channel = portals[i]; - i = (i + 1) % num_portals; - - if (list_is_last(ptr, head)) - break; + fq->fq_base = *template; + fq->net_dev = priv->net_dev; - ptr = ptr->next; + if (port) { + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; + fq->channel = fm_get_tx_port_channel(port); + } else { + fq->flags = QMAN_FQ_FLAG_NO_MODIFY; } } @@ -3684,58 +3587,106 @@ dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count, buf_layout, frag_enabled); } -static void dpa_rx_fq_init(struct dpa_priv_s *priv, struct list_head *head, - struct dpa_fq *defq, struct dpa_fq *errq, - struct dpa_fq *fqs) -{ - if (fqs) - dpa_setup_ingress_queues(priv, head, fqs); - - /* Only real devices need default/error queues set up */ - if (!priv->mac_dev) - return; - - if (defq->fqid == 0 && netif_msg_probe(priv)) - pr_info("Using dynamic RX QM frame queues\n"); - - if (priv->shared) { - dpa_setup_ingress(priv, defq, &rx_shared_fq); - dpa_setup_ingress(priv, errq, &rx_shared_fq); - } else { - dpa_setup_ingress(priv, defq, &rx_private_defq); - dpa_setup_ingress(priv, errq, &rx_private_errq); - } -} - -static void dpa_tx_fq_init(struct dpa_priv_s *priv, struct list_head *head, - struct dpa_fq *defq, struct dpa_fq *errq, - struct dpa_fq *fqs, struct dpa_fq *confqs, - struct dpa_fq *recyclefqs, struct fm_port *port) +static void dpa_fq_setup(struct dpa_priv_s *priv) { - if (fqs) - dpa_setup_egress(priv, head, fqs, port); - - /* Only real devices need default/error queues set up */ - if (!priv->mac_dev) - return; - - if (defq->fqid == 0 && netif_msg_probe(priv)) - pr_info("Using dynamic TX QM frame queues\n"); + struct dpa_fq *fq; + int portals[NR_CPUS]; + int cpu, portal_cnt = 0, num_portals = 0; + uint32_t pcd_fqid; + const cpumask_t *affine_cpus = qman_affine_cpus(); + int egress_cnt = 0, conf_cnt = 0; +#ifdef CONFIG_FSL_DPAA_TX_RECYCLE + int recycle_cnt = 0; +#endif + struct fm_port *tx_port; - /* The shared driver doesn't use tx confirmation */ - if (priv->shared) { - dpa_setup_ingress(priv, defq, &tx_shared_defq); - dpa_setup_ingress(priv, errq, &tx_shared_errq); - } else { - dpa_setup_ingress(priv, defq, &tx_private_defq); - dpa_setup_ingress(priv, errq, &tx_private_errq); - if (confqs) - dpa_setup_conf_queues(priv, confqs); + /* Prepare for PCD FQs init */ + for_each_cpu(cpu, affine_cpus) + portals[num_portals++] = qman_affine_channel(cpu); + if (num_portals == 0) + dev_err(priv->net_dev->dev.parent, + "No Qman software (affine) channels found"); + + pcd_fqid = (priv->mac_dev) ? + DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0; + + /* Get the Tx Fman port (needed for egress fqs) */ + tx_port = (priv->mac_dev) ? priv->mac_dev->port_dev[TX] : NULL; + + /* Initialize each FQ in the list */ + list_for_each_entry(fq, &priv->dpa_fq_list, list) { + switch (fq->fq_type) { + case FQ_TYPE_RX_DEFAULT: + BUG_ON(!priv->mac_dev); + dpa_setup_ingress(priv, fq, (priv->shared ? + &rx_shared_fq : &rx_private_defq)); + break; + case FQ_TYPE_RX_ERROR: + BUG_ON(!priv->mac_dev); + dpa_setup_ingress(priv, fq, (priv->shared ? + &rx_shared_fq : &rx_private_errq)); + break; + case FQ_TYPE_RX_PCD: + /* For MACless we can't have dynamic Rx queues */ + BUG_ON(!priv->mac_dev && !fq->fqid); + dpa_setup_ingress(priv, fq, (priv->shared ? + &rx_shared_fq : &rx_private_defq)); + if (!fq->fqid) + fq->fqid = pcd_fqid++; + fq->channel = portals[portal_cnt]; + portal_cnt = (portal_cnt + 1) % num_portals; + break; + case FQ_TYPE_TX: + dpa_setup_egress(priv, fq, tx_port, (priv->shared ? + &shared_egress_fq : &private_egress_fq)); + /* If we have more Tx queues than the number of cores, + * just ignore the extra ones. + */ + if (egress_cnt < DPAA_ETH_TX_QUEUES) + priv->egress_fqs[egress_cnt++] = &fq->fq_base; + break; + case FQ_TYPE_TX_CONFIRM: + BUG_ON(!priv->mac_dev); + dpa_setup_ingress(priv, fq, (priv->shared ? + &tx_shared_defq : &tx_private_defq)); + break; + case FQ_TYPE_TX_CONF_MQ: + BUG_ON(!priv->mac_dev); + BUG_ON(priv->shared); + dpa_setup_ingress(priv, fq, &tx_private_defq); + priv->conf_fqs[conf_cnt++] = &fq->fq_base; + break; + case FQ_TYPE_TX_ERROR: + BUG_ON(!priv->mac_dev); + dpa_setup_ingress(priv, fq, (priv->shared ? + &tx_shared_errq : &tx_private_errq)); + break; #ifdef CONFIG_FSL_DPAA_TX_RECYCLE - if (recyclefqs) - dpa_setup_recycle_queues(priv, recyclefqs, port); + case FQ_TYPE_TX_RECYCLE: + BUG_ON(!priv->mac_dev); + BUG_ON(priv->shared); + dpa_setup_egress(priv, fq, tx_port, &private_egress_fq); + priv->recycle_fqs[recycle_cnt++] = &fq->fq_base; + break; #endif + default: + dev_warn(priv->net_dev->dev.parent, + "Unknown FQ type detected!\n"); + break; + } + } + /* The number of Tx queues may be smaller than the number of cores, if + * the Tx queue range is specified in the device tree instead of being + * dynamically allocated. + * Make sure all CPUs receive a corresponding Tx queue. + */ + while (egress_cnt < DPAA_ETH_TX_QUEUES) { + list_for_each_entry(fq, &priv->dpa_fq_list, list) { + priv->egress_fqs[egress_cnt++] = &fq->fq_base; + if (egress_cnt == DPAA_ETH_TX_QUEUES) + break; + } } } @@ -3935,18 +3886,13 @@ dpaa_eth_probe(struct platform_device *_of_dev) struct dpa_fq *dpa_fq, *tmp; struct list_head rxfqlist; struct list_head txfqlist; + struct list_head proxy_fq_list; + struct list_head *fq_list; size_t count; struct net_device *net_dev = NULL; struct dpa_priv_s *priv = NULL; struct dpa_percpu_priv_s *percpu_priv; - struct dpa_fq *rxdefault = NULL; - struct dpa_fq *txdefault = NULL; - struct dpa_fq *rxerror = NULL; - struct dpa_fq *txerror = NULL; - struct dpa_fq *rxextra = NULL; - struct dpa_fq *txfqs = NULL; - struct dpa_fq *txconf = NULL; - struct dpa_fq *txrecycle = NULL; + struct fm_port_fqs port_fqs; struct fm_port *rxport = NULL; struct fm_port *txport = NULL; struct dpa_buffer_layout_s *buf_layout = NULL; @@ -4036,29 +3982,26 @@ dpaa_eth_probe(struct platform_device *_of_dev) INIT_LIST_HEAD(&rxfqlist); INIT_LIST_HEAD(&txfqlist); + if (net_dev) { + INIT_LIST_HEAD(&priv->dpa_fq_list); + fq_list = &priv->dpa_fq_list; + } else { + INIT_LIST_HEAD(&proxy_fq_list); + fq_list = &proxy_fq_list; + } + + memset(&port_fqs, 0, sizeof(port_fqs)); if (rxport) - err = dpa_fq_probe(_of_dev, &rxfqlist, &rxdefault, &rxerror, - &rxextra, NULL, NULL, RX); + err = dpa_fq_probe_mac(dev, fq_list, &port_fqs, is_shared, RX); else - err = dpa_fq_probe(_of_dev, &rxfqlist, NULL, NULL, - &rxextra, NULL, NULL, RX); - + err = dpa_fq_probe_macless(dev, fq_list, RX); if (err < 0) goto rx_fq_probe_failed; if (txport) -#ifdef CONFIG_FSL_DPAA_TX_RECYCLE - err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror, - &txfqs, (is_shared ? NULL : &txconf), - (is_shared ? NULL : &txrecycle), TX); -#else - err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror, - &txfqs, (is_shared ? NULL : &txconf), NULL, TX); -#endif + err = dpa_fq_probe_mac(dev, fq_list, &port_fqs, is_shared, TX); else - err = dpa_fq_probe(_of_dev, &txfqlist, NULL, NULL, &txfqs, - NULL, NULL, TX); - + err = dpa_fq_probe_macless(dev, fq_list, TX); if (err < 0) goto tx_fq_probe_failed; @@ -4101,9 +4044,7 @@ dpaa_eth_probe(struct platform_device *_of_dev) goto add_channel_failed; } - dpa_rx_fq_init(priv, &rxfqlist, rxdefault, rxerror, rxextra); - dpa_tx_fq_init(priv, &txfqlist, txdefault, txerror, txfqs, - txconf, txrecycle, txport); + dpa_fq_setup(priv); /* * Create a congestion group for this netdev, with @@ -4121,16 +4062,8 @@ dpaa_eth_probe(struct platform_device *_of_dev) } /* Add the FQs to the interface, and make them active */ - INIT_LIST_HEAD(&priv->dpa_fq_list); - - list_for_each_entry_safe(dpa_fq, tmp, &rxfqlist, list) { - err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq); - if (err < 0) - goto fq_alloc_failed; - } - - list_for_each_entry_safe(dpa_fq, tmp, &txfqlist, list) { - err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq); + list_for_each_entry_safe(dpa_fq, tmp, fq_list, list) { + err = dpa_fq_init(dpa_fq); if (err < 0) goto fq_alloc_failed; } @@ -4148,10 +4081,10 @@ dpaa_eth_probe(struct platform_device *_of_dev) if (mac_dev) { struct fm_port_pcd_param rx_port_pcd_param; - dpaa_eth_init_tx_port(txport, txerror, txdefault, - &buf_layout[TX]); - dpaa_eth_init_rx_port(rxport, dpa_bp, count, rxerror, - rxdefault, &buf_layout[RX]); + dpaa_eth_init_tx_port(txport, port_fqs.tx_errq, + port_fqs.tx_defq, &buf_layout[TX]); + dpaa_eth_init_rx_port(rxport, dpa_bp, count, port_fqs.rx_errq, + port_fqs.rx_defq, &buf_layout[RX]); rx_port_pcd_param.cba = dpa_alloc_pcd_fqids; rx_port_pcd_param.cbf = dpa_free_pcd_fqids; @@ -4164,11 +4097,13 @@ dpaa_eth_probe(struct platform_device *_of_dev) * memory freed */ if (!net_dev) { - devm_kfree(&_of_dev->dev, dpa_bp); - devm_kfree(&_of_dev->dev, rxdefault); - devm_kfree(&_of_dev->dev, rxerror); - devm_kfree(&_of_dev->dev, txdefault); - devm_kfree(&_of_dev->dev, txerror); + devm_kfree(dev, dpa_bp); + + /* Free FQ structures */ + devm_kfree(dev, port_fqs.rx_defq); + devm_kfree(dev, port_fqs.rx_errq); + devm_kfree(dev, port_fqs.tx_defq); + devm_kfree(dev, port_fqs.tx_errq); if (mac_dev) for_each_port_device(i, mac_dev->port_dev) @@ -4259,6 +4194,11 @@ static int __cold dpa_remove(struct platform_device *of_dev) dev = &of_dev->dev; net_dev = dev_get_drvdata(dev); + + /* Nothing to do here for proxy ports */ + if (!net_dev) + return 0; + priv = netdev_priv(net_dev); dpaa_eth_sysfs_remove(dev); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h index 939bcb1..9be51ce 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h @@ -101,6 +101,9 @@ #define FMAN_PCD_TESTS_MAX_NUM_RANGES 20 #endif +#define DPAA_ETH_PCD_FQ_BASE(device_addr) \ + (((device_addr) & 0x1fffff) >> 6) + /* return codes for the dpaa-eth hooks */ enum dpaa_eth_hook_result { /* fd/skb was retained by the hook. @@ -394,6 +397,13 @@ struct dpa_priv_s { u8 macless_idx; }; +struct fm_port_fqs { + struct dpa_fq *tx_defq; + struct dpa_fq *tx_errq; + struct dpa_fq *rx_defq; + struct dpa_fq *rx_errq; +}; + extern const struct ethtool_ops dpa_ethtool_ops; void __attribute__((nonnull)) @@ -590,6 +600,7 @@ static inline void _dpa_assign_wq(struct dpa_fq *fq) { switch (fq->fq_type) { case FQ_TYPE_TX_CONFIRM: + case FQ_TYPE_TX_CONF_MQ: fq->wq = 1; break; case FQ_TYPE_RX_DEFAULT: diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c index 7b7c103..63bb153 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c @@ -95,7 +95,10 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, str = "Rx PCD"; break; case FQ_TYPE_TX_CONFIRM: - str = "Tx confirmation"; + str = "Tx default confirmation"; + break; + case FQ_TYPE_TX_CONF_MQ: + str = "Tx confirmation (mq)"; break; case FQ_TYPE_TX_ERROR: str = "Tx error"; -- cgit v0.10.2