diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/fsl_qbman/bman_config.c | 8 | ||||
-rw-r--r-- | drivers/staging/fsl_qbman/bman_driver.c | 7 | ||||
-rw-r--r-- | drivers/staging/fsl_qbman/bman_high.c | 4 | ||||
-rw-r--r-- | drivers/staging/fsl_qbman/fsl_usdpaa.c | 6 | ||||
-rw-r--r-- | drivers/staging/fsl_qbman/qman_config.c | 18 | ||||
-rw-r--r-- | drivers/staging/fsl_qbman/qman_driver.c | 6 | ||||
-rw-r--r-- | drivers/staging/fsl_qbman/qman_high.c | 18 |
7 files changed, 62 insertions, 5 deletions
diff --git a/drivers/staging/fsl_qbman/bman_config.c b/drivers/staging/fsl_qbman/bman_config.c index 07f6e2f..136e22d 100644 --- a/drivers/staging/fsl_qbman/bman_config.c +++ b/drivers/staging/fsl_qbman/bman_config.c @@ -294,6 +294,8 @@ static __init int parse_mem_property(struct device_node *node, const char *name, } else if (zero) { /* map as cacheable, non-guarded */ void __iomem *tmpp = ioremap_prot(*addr, *sz, 0); + if (!tmpp) + return -ENOMEM; memset_io(tmpp, 0, *sz); vaddr = (unsigned long)tmpp; flush_dcache_range(vaddr, vaddr + *sz); @@ -506,6 +508,8 @@ u32 bm_pool_free_buffers(u32 bpid) #ifdef CONFIG_SYSFS #define DRV_NAME "fsl-bman" +#define SBEC_MAX_ID 1 +#define SBEC_MIN_ID 0 static ssize_t show_fbpr_fpc(struct device *dev, struct device_attribute *dev_attr, char *buf) @@ -519,7 +523,7 @@ static ssize_t show_pool_count(struct device *dev, u32 data; int i; - if (!sscanf(dev_attr->attr.name, "%d", &i)) + if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max)) return -EINVAL; data = bm_in(POOL_CONTENT(i)); return snprintf(buf, PAGE_SIZE, "%d\n", data); @@ -538,6 +542,8 @@ static ssize_t show_sbec(struct device *dev, if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) return -EINVAL; + if (i < SBEC_MIN_ID || i > SBEC_MAX_ID) + return -EINVAL; return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i))); }; diff --git a/drivers/staging/fsl_qbman/bman_driver.c b/drivers/staging/fsl_qbman/bman_driver.c index 2eb590f..980a7d3 100644 --- a/drivers/staging/fsl_qbman/bman_driver.c +++ b/drivers/staging/fsl_qbman/bman_driver.c @@ -108,6 +108,7 @@ static struct bm_portal_config * __init parse_pcfg(struct device_node *node) struct bm_portal_config *pcfg; const u32 *index; int irq, ret; + resource_size_t len; pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); if (!pcfg) { @@ -176,9 +177,13 @@ static struct bm_portal_config * __init parse_pcfg(struct device_node *node) pcfg->public_cfg.index = *index; bman_depletion_fill(&pcfg->public_cfg.mask); + len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]); + if (len != (unsigned long)len) + goto err; + pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( pcfg->addr_phys[DPA_PORTAL_CE].start, - resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]), + (unsigned long)len, 0); pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( pcfg->addr_phys[DPA_PORTAL_CI].start, diff --git a/drivers/staging/fsl_qbman/bman_high.c b/drivers/staging/fsl_qbman/bman_high.c index cc25de4..963e9d6 100644 --- a/drivers/staging/fsl_qbman/bman_high.c +++ b/drivers/staging/fsl_qbman/bman_high.c @@ -820,6 +820,8 @@ static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p, pool = NULL; #endif if (flags & BMAN_RELEASE_FLAG_WAIT_INT) + /* NB: return NULL if signal occurs before completion. Signal + * can occur during return. Caller must check for signal */ wait_event_interruptible(affine_queue, (rcr = __wait_rel_start(p, pool, irqflags, flags))); else @@ -883,6 +885,8 @@ static inline int __bman_release(struct bman_pool *pool, if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { if (flags & BMAN_RELEASE_FLAG_WAIT_INT) + /* NB: return success even if signal occurs before + * condition is true. pvb_commit guarantees success */ wait_event_interruptible(affine_queue, (p->rcri_owned != pool)); else diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c index 8cfdabe..dfef598 100644 --- a/drivers/staging/fsl_qbman/fsl_usdpaa.c +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c @@ -1251,9 +1251,13 @@ map_match: static int portal_mmap(struct file *fp, struct resource *res, void **ptr) { unsigned long longret = 0, populate; + resource_size_t len; down_write(¤t->mm->mmap_sem); - longret = do_mmap_pgoff(fp, PAGE_SIZE, resource_size(res), + len = resource_size(res); + if (len != (unsigned long)len) + return -EINVAL; + longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len, PROT_READ | PROT_WRITE, MAP_SHARED, res->start >> PAGE_SHIFT, &populate); up_write(¤t->mm->mmap_sem); diff --git a/drivers/staging/fsl_qbman/qman_config.c b/drivers/staging/fsl_qbman/qman_config.c index 5fc1a2d..6a6225e 100644 --- a/drivers/staging/fsl_qbman/qman_config.c +++ b/drivers/staging/fsl_qbman/qman_config.c @@ -474,6 +474,8 @@ static __init int parse_mem_property(struct device_node *node, const char *name, } else if (zero) { /* map as cacheable, non-guarded */ void __iomem *tmpp = ioremap_prot(*addr, *sz, 0); + if (!tmpp) + return -ENOMEM; memset_io(tmpp, 0, *sz); vaddr = (unsigned long)tmpp; flush_dcache_range(vaddr, vaddr + *sz); @@ -490,6 +492,7 @@ static __init int parse_mem_property(struct device_node *node, const char *name, static int __init fsl_qman_init(struct device_node *node) { struct resource res; + resource_size_t len; u32 __iomem *regs; const char *s; int ret, standby = 0; @@ -512,7 +515,10 @@ static int __init fsl_qman_init(struct device_node *node) BUG_ON(ret); } /* Global configuration */ - regs = ioremap(res.start, res.end - res.start + 1); + len = resource_size(&res); + if (len != (unsigned long)len) + return -EINVAL; + regs = ioremap(res.start, (unsigned long)len); qm = qm_create(regs); qm_node = node; qm_get_version(qm, &id, &major, &minor, &cfg); @@ -872,6 +878,8 @@ EXPORT_SYMBOL(qman_ceetm_get_xsfdr); #ifdef CONFIG_SYSFS #define DRV_NAME "fsl-qman" +#define DCP_MAX_ID 3 +#define DCP_MIN_ID 0 static ssize_t show_pfdr_fpc(struct device *dev, struct device_attribute *dev_attr, char *buf) @@ -887,6 +895,8 @@ static ssize_t show_dlm_avg(struct device *dev, if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) return -EINVAL; + if (i < DCP_MIN_ID || i > DCP_MAX_ID) + return -EINVAL; data = qm_in(DCP_DLM_AVG(i)); return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, (data & 0x000000ff)*390625); @@ -900,6 +910,8 @@ static ssize_t set_dlm_avg(struct device *dev, if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) return -EINVAL; + if (i < DCP_MIN_ID || i > DCP_MAX_ID) + return -EINVAL; if (kstrtoul(buf, 0, &val)) { dev_dbg(dev, "invalid input %s\n", buf); return -EINVAL; @@ -966,6 +978,8 @@ static ssize_t show_err_isr(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR)); }; +#define SBEC_MAX_ID 14 +#define SBEC_MIN_ID 0 static ssize_t show_sbec(struct device *dev, struct device_attribute *dev_attr, char *buf) @@ -974,6 +988,8 @@ static ssize_t show_sbec(struct device *dev, if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) return -EINVAL; + if (i < SBEC_MIN_ID || i > SBEC_MAX_ID) + return -EINVAL; return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i))); }; diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c index 983f99a..977d224 100644 --- a/drivers/staging/fsl_qbman/qman_driver.c +++ b/drivers/staging/fsl_qbman/qman_driver.c @@ -375,6 +375,7 @@ static struct qm_portal_config * __init parse_pcfg(struct device_node *node) struct qm_portal_config *pcfg; const u32 *index, *channel; int irq, ret; + resource_size_t len; pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); if (!pcfg) { @@ -443,9 +444,12 @@ static struct qm_portal_config * __init parse_pcfg(struct device_node *node) qman_liodn_fixup(pcfg->public_cfg.channel); #endif + len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]); + if (len != (unsigned long)len) + goto err; pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot( pcfg->addr_phys[DPA_PORTAL_CE].start, - resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]), + (unsigned long)len, 0); pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot( pcfg->addr_phys[DPA_PORTAL_CI].start, diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c index c3a341c..aed5b26 100644 --- a/drivers/staging/fsl_qbman/qman_high.c +++ b/drivers/staging/fsl_qbman/qman_high.c @@ -2245,6 +2245,8 @@ static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p, { struct qm_eqcr_entry *eq; if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return NULL if signal occurs before completion. Signal + * can occur during return. Caller must check for signal */ wait_event_interruptible(affine_queue, (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); else @@ -2271,6 +2273,8 @@ static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p, { struct qm_eqcr_entry *eq; if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return NULL if signal occurs before completion. Signal + * can occur during return. Caller must check for signal */ wait_event_interruptible(affine_queue, (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags))); else @@ -2303,6 +2307,8 @@ int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq, if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return success even if signal occurs before + * condition is true. pvb_commit guarantees success */ wait_event_interruptible(affine_queue, (p->eqci_owned != fq)); else @@ -2337,6 +2343,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return success even if signal occurs before + * condition is true. pvb_commit guarantees success */ wait_event_interruptible(affine_queue, (p->eqci_owned != fq)); else @@ -2385,6 +2393,8 @@ int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq, if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return success even if signal occurs before + * condition is true. pvb_commit guarantees success */ wait_event_interruptible(affine_queue, (p->eqci_owned != fq)); else @@ -2434,6 +2444,8 @@ int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return success even if signal occurs before + * condition is true. pvb_commit guarantees success */ wait_event_interruptible(affine_queue, (p->eqci_owned != fq)); else @@ -2473,6 +2485,8 @@ int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq, if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return success even if signal occurs before + * condition is true. pvb_commit guarantees success */ wait_event_interruptible(affine_queue, (p->eqci_owned != fq)); else @@ -2514,6 +2528,8 @@ int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd, if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + /* NB: return success even if signal occurs before + * condition is true. pvb_commit guarantees success */ wait_event_interruptible(affine_queue, (p->eqci_owned != fq)); else @@ -3774,6 +3790,8 @@ int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel, } p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; p->idx = channel_idx; p->dcp_idx = lni->dcp_idx; list_add_tail(&p->node, &lni->channels); |