summaryrefslogtreecommitdiff
path: root/drivers/staging/tidspbridge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/tidspbridge')
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c704
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c27
-rw-r--r--drivers/staging/tidspbridge/dynload/cload.c69
3 files changed, 404 insertions, 396 deletions
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index c51f651..480a384 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -128,6 +128,16 @@ struct io_mgr {
};
+struct shm_symbol_val {
+ u32 shm_base;
+ u32 shm_lim;
+ u32 msg_base;
+ u32 msg_lim;
+ u32 shm0_end;
+ u32 dyn_ext;
+ u32 ext_end;
+};
+
/* Function Prototypes */
static void io_dispatch_pm(struct io_mgr *pio_mgr);
static void notify_chnl_complete(struct chnl_object *pchnl,
@@ -256,6 +266,75 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
return status;
}
+struct shm_symbol_val *_get_shm_symbol_values(struct io_mgr *hio_mgr)
+{
+ struct shm_symbol_val *s;
+ struct cod_manager *cod_man;
+ int status;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+
+ status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
+ if (status)
+ goto free_symbol;
+
+ /* Get start and length of channel part of shared memory */
+ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
+ &s->shm_base);
+ if (status)
+ goto free_symbol;
+
+ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
+ &s->shm_lim);
+ if (status)
+ goto free_symbol;
+
+ if (s->shm_lim <= s->shm_base) {
+ status = -EINVAL;
+ goto free_symbol;
+ }
+
+ /* Get start and length of message part of shared memory */
+ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
+ &s->msg_base);
+ if (status)
+ goto free_symbol;
+
+ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
+ &s->msg_lim);
+ if (status)
+ goto free_symbol;
+
+ if (s->msg_lim <= s->msg_base) {
+ status = -EINVAL;
+ goto free_symbol;
+ }
+
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+ status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &s->shm0_end);
+#else
+ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &s->shm0_end);
+#endif
+ if (status)
+ goto free_symbol;
+
+ status = cod_get_sym_value(cod_man, DYNEXTBASE, &s->dyn_ext);
+ if (status)
+ goto free_symbol;
+
+ status = cod_get_sym_value(cod_man, EXTEND, &s->ext_end);
+ if (status)
+ goto free_symbol;
+
+ return s;
+
+free_symbol:
+ kfree(s);
+ return ERR_PTR(status);
+}
+
/*
* ======== bridge_io_on_loaded ========
* Purpose:
@@ -265,193 +344,112 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
*/
int bridge_io_on_loaded(struct io_mgr *hio_mgr)
{
+ struct bridge_dev_context *dc = hio_mgr->bridge_context;
+ struct cfg_hostres *cfg_res = dc->resources;
+ struct bridge_ioctl_extproc *eproc;
struct cod_manager *cod_man;
struct chnl_mgr *hchnl_mgr;
struct msg_mgr *hmsg_mgr;
- u32 ul_shm_base;
- u32 ul_shm_base_offset;
- u32 ul_shm_limit;
- u32 ul_shm_length = -1;
- u32 ul_mem_length = -1;
- u32 ul_msg_base;
- u32 ul_msg_limit;
- u32 ul_msg_length = -1;
- u32 ul_ext_end;
- u32 ul_gpp_pa = 0;
- u32 ul_gpp_va = 0;
- u32 ul_dsp_va = 0;
- u32 ul_seg_size = 0;
- u32 ul_pad_size = 0;
+ struct shm_symbol_val *s;
+ int status;
+ u8 num_procs;
+ s32 ndx;
u32 i;
- int status = 0;
- u8 num_procs = 0;
- s32 ndx = 0;
- /* DSP MMU setup table */
- struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
- struct cfg_hostres *host_res;
- struct bridge_dev_context *pbridge_context;
- u32 map_attrs;
- u32 shm0_end;
- u32 ul_dyn_ext_base;
- u32 ul_seg1_size = 0;
- u32 pa_curr = 0;
- u32 va_curr = 0;
- u32 gpp_va_curr = 0;
- u32 num_bytes = 0;
+ u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs;
+ u32 seg0_sz, seg1_sz;
+ u32 pa, va, da;
+ u32 pa_curr, va_curr, da_curr;
+ u32 bytes;
u32 all_bits = 0;
- u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+ u32 page_size[] = {
+ HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
+ u32 map_attrs = DSP_MAPLITTLEENDIAN | DSP_MAPPHYSICALADDR |
+ DSP_MAPELEMSIZE32 | DSP_MAPDONOTLOCK;
- status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
- if (!pbridge_context) {
- status = -EFAULT;
- goto func_end;
- }
-
- host_res = pbridge_context->resources;
- if (!host_res) {
- status = -EFAULT;
- goto func_end;
- }
status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
- if (!cod_man) {
- status = -EFAULT;
- goto func_end;
- }
+ if (status)
+ return status;
+
hchnl_mgr = hio_mgr->chnl_mgr;
- /* The message manager is destroyed when the board is stopped. */
+
+ /* The message manager is destroyed when the board is stopped */
dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
hmsg_mgr = hio_mgr->msg_mgr;
- if (!hchnl_mgr || !hmsg_mgr) {
- status = -EFAULT;
- goto func_end;
- }
+ if (!hchnl_mgr || !hmsg_mgr)
+ return -EFAULT;
+
if (hio_mgr->shared_mem)
hio_mgr->shared_mem = NULL;
- /* Get start and length of channel part of shared memory */
- status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
- &ul_shm_base);
- if (status) {
- status = -EFAULT;
- goto func_end;
- }
- status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
- &ul_shm_limit);
- if (status) {
- status = -EFAULT;
- goto func_end;
- }
- if (ul_shm_limit <= ul_shm_base) {
- status = -EINVAL;
- goto func_end;
- }
+ s = _get_shm_symbol_values(hio_mgr);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
/* Get total length in bytes */
- ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
+ shm_sz = (s->shm_lim - s->shm_base + 1) * hio_mgr->word_size;
+
/* Calculate size of a PROCCOPY shared memory region */
dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
- __func__, (ul_shm_length - sizeof(struct shm)));
+ __func__, shm_sz - sizeof(struct shm));
- /* Get start and length of message part of shared memory */
- status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
- &ul_msg_base);
- if (!status) {
- status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
- &ul_msg_limit);
- if (!status) {
- if (ul_msg_limit <= ul_msg_base) {
- status = -EINVAL;
- } else {
- /*
- * Length (bytes) of messaging part of shared
- * memory.
- */
- ul_msg_length =
- (ul_msg_limit - ul_msg_base +
- 1) * hio_mgr->word_size;
- /*
- * Total length (bytes) of shared memory:
- * chnl + msg.
- */
- ul_mem_length = ul_shm_length + ul_msg_length;
- }
- } else {
- status = -EFAULT;
- }
- } else {
- status = -EFAULT;
- }
- if (!status) {
-#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
- status =
- cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
-#else
- status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
- &shm0_end);
-#endif
- if (status)
- status = -EFAULT;
- }
- if (!status) {
- status =
- cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
- if (status)
- status = -EFAULT;
- }
- if (!status) {
- status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
- if (status)
- status = -EFAULT;
+ /* Length (bytes) of messaging part of shared memory */
+ msg_sz = (s->msg_lim - s->msg_base + 1) * hio_mgr->word_size;
+
+ /* Total length (bytes) of shared memory: chnl + msg */
+ mem_sz = shm_sz + msg_sz;
+
+ /* Get memory reserved in host resources */
+ (void)mgr_enum_processor_info(0,
+ (struct dsp_processorinfo *)
+ &hio_mgr->ext_proc_info,
+ sizeof(struct mgr_processorextinfo),
+ &num_procs);
+
+ /* IO supports only one DSP for now */
+ if (num_procs != 1) {
+ status = -EINVAL;
+ goto free_symbol;
}
- if (!status) {
- /* Get memory reserved in host resources */
- (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
- &hio_mgr->ext_proc_info,
- sizeof(struct
- mgr_processorextinfo),
- &num_procs);
-
- /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
- ndx = 0;
- ul_gpp_pa = host_res->mem_phys[1];
- ul_gpp_va = host_res->mem_base[1];
- /* This is the virtual uncached ioremapped address!!! */
- /* Why can't we directly take the DSPVA from the symbols? */
- ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
- ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
- ul_seg1_size =
- (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
- /* 4K align */
- ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
- /* 64K align */
- ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
- ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
- UL_PAGE_ALIGN_SIZE);
- if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
- ul_pad_size = 0x0;
-
- dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
- "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
- "ul_seg_size %x ul_seg1_size %x \n", __func__,
- ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
- ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
-
- if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
- host_res->mem_length[1]) {
- pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
- __func__, host_res->mem_length[1],
- ul_seg_size + ul_seg1_size + ul_pad_size);
- status = -ENOMEM;
- }
+
+ /* The first MMU TLB entry(TLB_0) in DCD is ShmBase */
+ pa = cfg_res->mem_phys[1];
+ va = cfg_res->mem_base[1];
+
+ /* This is the virtual uncached ioremapped address!!! */
+ /* Why can't we directly take the DSPVA from the symbols? */
+ da = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
+ seg0_sz = (s->shm0_end - da) * hio_mgr->word_size;
+ seg1_sz = (s->ext_end - s->dyn_ext) * hio_mgr->word_size;
+
+ /* 4K align */
+ seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL);
+
+ /* 64K align */
+ seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL);
+
+ pad_sz = UL_PAGE_ALIGN_SIZE - ((pa + seg1_sz) % UL_PAGE_ALIGN_SIZE);
+ if (pad_sz == UL_PAGE_ALIGN_SIZE)
+ pad_sz = 0x0;
+
+ dev_dbg(bridge, "%s: pa %x, va %x, da %x\n", __func__, pa, va, da);
+ dev_dbg(bridge,
+ "shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n",
+ s->shm0_end, s->dyn_ext, s->ext_end, seg0_sz, seg1_sz);
+
+ if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->mem_length[1]) {
+ pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
+ __func__, cfg_res->mem_length[1],
+ seg0_sz + seg1_sz + pad_sz);
+ status = -ENOMEM;
+ goto free_symbol;
}
- if (status)
- goto func_end;
- pa_curr = ul_gpp_pa;
- va_curr = ul_dyn_ext_base * hio_mgr->word_size;
- gpp_va_curr = ul_gpp_va;
- num_bytes = ul_seg1_size;
+ pa_curr = pa;
+ va_curr = s->dyn_ext * hio_mgr->word_size;
+ da_curr = va;
+ bytes = seg1_sz;
/*
* Try to fit into TLB entries. If not possible, push them to page
@@ -459,37 +457,30 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
* bigger page boundary, we may end up making several small pages.
* So, push them onto page tables, if that is the case.
*/
- map_attrs = 0x00000000;
- map_attrs = DSP_MAPLITTLEENDIAN;
- map_attrs |= DSP_MAPPHYSICALADDR;
- map_attrs |= DSP_MAPELEMSIZE32;
- map_attrs |= DSP_MAPDONOTLOCK;
-
- while (num_bytes) {
+ while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
- dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
- "num_bytes %x\n", all_bits, pa_curr, va_curr,
- num_bytes);
+ dev_dbg(bridge,
+ "seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
+ all_bits, pa_curr, va_curr, bytes);
+
for (i = 0; i < 4; i++) {
- if ((num_bytes >= page_size[i]) && ((all_bits &
- (page_size[i] -
- 1)) == 0)) {
- status =
- hio_mgr->intf_fxns->
- brd_mem_map(hio_mgr->bridge_context,
- pa_curr, va_curr,
- page_size[i], map_attrs,
- NULL);
+ if ((bytes >= page_size[i]) &&
+ ((all_bits & (page_size[i] - 1)) == 0)) {
+ status = hio_mgr->intf_fxns->brd_mem_map(dc,
+ pa_curr, va_curr,
+ page_size[i], map_attrs,
+ NULL);
if (status)
- goto func_end;
+ goto free_symbol;
+
pa_curr += page_size[i];
va_curr += page_size[i];
- gpp_va_curr += page_size[i];
- num_bytes -= page_size[i];
+ da_curr += page_size[i];
+ bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
@@ -499,71 +490,75 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
}
}
}
- pa_curr += ul_pad_size;
- va_curr += ul_pad_size;
- gpp_va_curr += ul_pad_size;
+ pa_curr += pad_sz;
+ va_curr += pad_sz;
+ da_curr += pad_sz;
+ bytes = seg0_sz;
+ va_curr = da * hio_mgr->word_size;
+
+ eproc = kzalloc(sizeof(*eproc) * BRDIOCTL_NUMOFMMUTLB, GFP_KERNEL);
+ if (!eproc) {
+ status = -ENOMEM;
+ goto free_symbol;
+ }
+
+ ndx = 0;
/* Configure the TLB entries for the next cacheable segment */
- num_bytes = ul_seg_size;
- va_curr = ul_dsp_va * hio_mgr->word_size;
- while (num_bytes) {
+ while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
- dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
- "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
- va_curr, num_bytes);
+ dev_dbg(bridge,
+ "seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
+ all_bits, pa_curr, va_curr, bytes);
+
for (i = 0; i < 4; i++) {
- if (!(num_bytes >= page_size[i]) ||
+ if (!(bytes >= page_size[i]) ||
!((all_bits & (page_size[i] - 1)) == 0))
continue;
- if (ndx < MAX_LOCK_TLB_ENTRIES) {
- /*
- * This is the physical address written to
- * DSP MMU.
- */
- ae_proc[ndx].gpp_pa = pa_curr;
- /*
- * This is the virtual uncached ioremapped
- * address!!!
- */
- ae_proc[ndx].gpp_va = gpp_va_curr;
- ae_proc[ndx].dsp_va =
- va_curr / hio_mgr->word_size;
- ae_proc[ndx].size = page_size[i];
- ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
- ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
- ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
- dev_dbg(bridge, "shm MMU TLB entry PA %x"
- " VA %x DSP_VA %x Size %x\n",
- ae_proc[ndx].gpp_pa,
- ae_proc[ndx].gpp_va,
- ae_proc[ndx].dsp_va *
- hio_mgr->word_size, page_size[i]);
- ndx++;
- } else {
- status =
- hio_mgr->intf_fxns->
- brd_mem_map(hio_mgr->bridge_context,
- pa_curr, va_curr,
- page_size[i], map_attrs,
- NULL);
+
+ if (ndx >= MAX_LOCK_TLB_ENTRIES) {
+ status = hio_mgr->intf_fxns->brd_mem_map(dc,
+ pa_curr, va_curr,
+ page_size[i], map_attrs,
+ NULL);
dev_dbg(bridge,
- "shm MMU PTE entry PA %x"
- " VA %x DSP_VA %x Size %x\n",
- ae_proc[ndx].gpp_pa,
- ae_proc[ndx].gpp_va,
- ae_proc[ndx].dsp_va *
+ "PTE pa %x va %x dsp_va %x sz %x\n",
+ eproc[ndx].gpp_pa,
+ eproc[ndx].gpp_va,
+ eproc[ndx].dsp_va *
hio_mgr->word_size, page_size[i]);
if (status)
- goto func_end;
+ goto free_eproc;
}
+
+ /* This is the physical address written to DSP MMU */
+ eproc[ndx].gpp_pa = pa_curr;
+
+ /*
+ * This is the virtual uncached ioremapped
+ * address!!!
+ */
+ eproc[ndx].gpp_va = da_curr;
+ eproc[ndx].dsp_va = va_curr / hio_mgr->word_size;
+ eproc[ndx].size = page_size[i];
+ eproc[ndx].endianism = HW_LITTLE_ENDIAN;
+ eproc[ndx].elem_size = HW_ELEM_SIZE16BIT;
+ eproc[ndx].mixed_mode = HW_MMU_CPUES;
+ dev_dbg(bridge, "%s: tlb pa %x va %x dsp_va %x sz %x\n",
+ __func__, eproc[ndx].gpp_pa,
+ eproc[ndx].gpp_va,
+ eproc[ndx].dsp_va * hio_mgr->word_size,
+ page_size[i]);
+ ndx++;
+
pa_curr += page_size[i];
va_curr += page_size[i];
- gpp_va_curr += page_size[i];
- num_bytes -= page_size[i];
+ da_curr += page_size[i];
+ bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have reached
* an address aligned to a bigger page size.
@@ -577,146 +572,127 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
* should not conflict with shm entries on MPU or DSP side.
*/
for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
- if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
+ struct mgr_processorextinfo *ep = &hio_mgr->ext_proc_info;
+ u32 word_sz = hio_mgr->word_size;
+
+ if (ep->ty_tlb[i].gpp_phys == 0)
continue;
- if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
- ul_gpp_pa - 0x100000
- && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
- ul_gpp_pa + ul_seg_size)
- || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
- ul_dsp_va - 0x100000 / hio_mgr->word_size
- && hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <=
- ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
+ if ((ep->ty_tlb[i].gpp_phys > pa - 0x100000 &&
+ ep->ty_tlb[i].gpp_phys <= pa + seg0_sz) ||
+ (ep->ty_tlb[i].dsp_virt > da - 0x100000 / word_sz &&
+ ep->ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) {
dev_dbg(bridge,
- "CDB MMU entry %d conflicts with "
- "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
- "GppPa %x, DspVa %x, Bytes %x.\n", i,
- hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
- hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
- ul_gpp_pa, ul_dsp_va, ul_seg_size);
+ "err cdb%d pa %x da %x shm pa %x da %x sz %x\n",
+ i, ep->ty_tlb[i].gpp_phys,
+ ep->ty_tlb[i].dsp_virt, pa, da, seg0_sz);
status = -EPERM;
- } else {
- if (ndx < MAX_LOCK_TLB_ENTRIES) {
- ae_proc[ndx].dsp_va =
- hio_mgr->ext_proc_info.ty_tlb[i].
- dsp_virt;
- ae_proc[ndx].gpp_pa =
- hio_mgr->ext_proc_info.ty_tlb[i].
- gpp_phys;
- ae_proc[ndx].gpp_va = 0;
- /* 1 MB */
- ae_proc[ndx].size = 0x100000;
- dev_dbg(bridge, "shm MMU entry PA %x "
- "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
- ae_proc[ndx].dsp_va);
- ndx++;
- } else {
- status = hio_mgr->intf_fxns->brd_mem_map
- (hio_mgr->bridge_context,
- hio_mgr->ext_proc_info.ty_tlb[i].
- gpp_phys,
- hio_mgr->ext_proc_info.ty_tlb[i].
- dsp_virt, 0x100000, map_attrs,
- NULL);
- }
+ goto free_eproc;
+ }
+
+ if (ndx >= MAX_LOCK_TLB_ENTRIES) {
+ status = hio_mgr->intf_fxns->brd_mem_map(dc,
+ ep->ty_tlb[i].gpp_phys,
+ ep->ty_tlb[i].dsp_virt,
+ 0x100000, map_attrs, NULL);
+ if (status)
+ goto free_eproc;
}
- if (status)
- goto func_end;
- }
- map_attrs = 0x00000000;
- map_attrs = DSP_MAPLITTLEENDIAN;
- map_attrs |= DSP_MAPPHYSICALADDR;
- map_attrs |= DSP_MAPELEMSIZE32;
- map_attrs |= DSP_MAPDONOTLOCK;
+ eproc[ndx].dsp_va = ep->ty_tlb[i].dsp_virt;
+ eproc[ndx].gpp_pa = ep->ty_tlb[i].gpp_phys;
+ eproc[ndx].gpp_va = 0;
+
+ /* 1 MB */
+ eproc[ndx].size = 0x100000;
+ dev_dbg(bridge, "shm MMU entry pa %x da 0x%x\n",
+ eproc[ndx].gpp_pa, eproc[ndx].dsp_va);
+ ndx++;
+ }
/* Map the L4 peripherals */
i = 0;
while (l4_peripheral_table[i].phys_addr) {
- status = hio_mgr->intf_fxns->brd_mem_map
- (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
- l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
- map_attrs, NULL);
+ status = hio_mgr->intf_fxns->brd_mem_map(dc,
+ l4_peripheral_table[i].phys_addr,
+ l4_peripheral_table[i].dsp_virt_addr,
+ HW_PAGE_SIZE4KB, map_attrs, NULL);
if (status)
- goto func_end;
+ goto free_eproc;
i++;
}
for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
- ae_proc[i].dsp_va = 0;
- ae_proc[i].gpp_pa = 0;
- ae_proc[i].gpp_va = 0;
- ae_proc[i].size = 0;
+ eproc[i].dsp_va = 0;
+ eproc[i].gpp_pa = 0;
+ eproc[i].gpp_va = 0;
+ eproc[i].size = 0;
}
+
/*
* Set the shm physical address entry (grayed out in CDB file)
* to the virtual uncached ioremapped address of shm reserved
* on MPU.
*/
hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
- (ul_gpp_va + ul_seg1_size + ul_pad_size);
+ (va + seg1_sz + pad_sz);
/*
* Need shm Phys addr. IO supports only one DSP for now:
* num_procs = 1.
*/
- if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
- status = -EFAULT;
- goto func_end;
- } else {
- if (ae_proc[0].dsp_va > ul_shm_base) {
- status = -EPERM;
- goto func_end;
- }
- /* ul_shm_base may not be at ul_dsp_va address */
- ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) *
+ if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys)
+ return -EFAULT;
+
+ if (eproc[0].dsp_va > s->shm_base)
+ return -EPERM;
+
+ /* shm_base may not be at ul_dsp_va address */
+ shm_base_offs = (s->shm_base - eproc[0].dsp_va) *
hio_mgr->word_size;
- /*
- * bridge_dev_ctrl() will set dev context dsp-mmu info. In
- * bridge_brd_start() the MMU will be re-programed with MMU
- * DSPVa-GPPPa pair info while DSP is in a known
- * (reset) state.
- */
+ /*
+ * bridge_dev_ctrl() will set dev context dsp-mmu info. In
+ * bridge_brd_start() the MMU will be re-programed with MMU
+ * DSPVa-GPPPa pair info while DSP is in a known
+ * (reset) state.
+ */
+ status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
+ BRDIOCTL_SETMMUCONFIG, eproc);
+ if (status)
+ goto free_eproc;
- status =
- hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
- BRDIOCTL_SETMMUCONFIG,
- ae_proc);
- if (status)
- goto func_end;
- ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
- ul_shm_base += ul_shm_base_offset;
- ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
- ul_mem_length);
- if (ul_shm_base == 0) {
- status = -EFAULT;
- goto func_end;
- }
- /* Register SM */
- status =
- register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa);
+ s->shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
+ s->shm_base += shm_base_offs;
+ s->shm_base = (u32) MEM_LINEAR_ADDRESS((void *)s->shm_base,
+ mem_sz);
+ if (!s->shm_base) {
+ status = -EFAULT;
+ goto free_eproc;
}
- hio_mgr->shared_mem = (struct shm *)ul_shm_base;
+ /* Register SM */
+ status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa);
+
+ hio_mgr->shared_mem = (struct shm *)s->shm_base;
hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
- hio_mgr->output = hio_mgr->input + (ul_shm_length -
+ hio_mgr->output = hio_mgr->input + (shm_sz -
sizeof(struct shm)) / 2;
hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
- /* Set up Shared memory addresses for messaging. */
- hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
- + ul_shm_length);
+ /* Set up Shared memory addresses for messaging */
+ hio_mgr->msg_input_ctrl =
+ (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + shm_sz);
hio_mgr->msg_input =
- (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
+ (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
hio_mgr->msg_output_ctrl =
- (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
- ul_msg_length / 2);
+ (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
+ msg_sz / 2);
hio_mgr->msg_output =
- (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
+ (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
hmsg_mgr->max_msgs =
- ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
- / sizeof(struct msg_dspmsg);
+ ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) /
+ sizeof(struct msg_dspmsg);
+
dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
"output %p, msg_input_ctrl %p, msg_input %p, "
"msg_output_ctrl %p, msg_output %p\n",
@@ -732,47 +708,53 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
/* Get the start address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
&hio_mgr->trace_buffer_begin);
- if (status) {
- status = -EFAULT;
- goto func_end;
- }
+ if (status)
+ goto free_eproc;
+
+ hio_mgr->gpp_read_pointer =
+ hio_mgr->trace_buffer_begin =
+ (va + seg1_sz + pad_sz) +
+ (hio_mgr->trace_buffer_begin - da);
- hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
- (ul_gpp_va + ul_seg1_size + ul_pad_size) +
- (hio_mgr->trace_buffer_begin - ul_dsp_va);
/* Get the end address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCEND,
&hio_mgr->trace_buffer_end);
- if (status) {
- status = -EFAULT;
- goto func_end;
- }
+ if (status)
+ goto free_eproc;
+
hio_mgr->trace_buffer_end =
- (ul_gpp_va + ul_seg1_size + ul_pad_size) +
- (hio_mgr->trace_buffer_end - ul_dsp_va);
+ (va + seg1_sz + pad_sz) +
+ (hio_mgr->trace_buffer_end - da);
+
/* Get the current address of DSP write pointer */
status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
&hio_mgr->trace_buffer_current);
- if (status) {
- status = -EFAULT;
- goto func_end;
- }
+ if (status)
+ goto free_eproc;
+
hio_mgr->trace_buffer_current =
- (ul_gpp_va + ul_seg1_size + ul_pad_size) +
- (hio_mgr->trace_buffer_current - ul_dsp_va);
+ (va + seg1_sz + pad_sz) +
+ (hio_mgr->trace_buffer_current - da);
+
/* Calculate the size of trace buffer */
kfree(hio_mgr->msg);
hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
hio_mgr->trace_buffer_begin) *
hio_mgr->word_size) + 2, GFP_KERNEL);
- if (!hio_mgr->msg)
+ if (!hio_mgr->msg) {
status = -ENOMEM;
+ goto free_eproc;
+ }
- hio_mgr->dsp_va = ul_dsp_va;
- hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
-
+ hio_mgr->dsp_va = da;
+ hio_mgr->gpp_va = (va + seg1_sz + pad_sz);
#endif
-func_end:
+
+free_eproc:
+ kfree(eproc);
+free_symbol:
+ kfree(s);
+
return status;
}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 9cf29fc..f9609ce 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -1547,20 +1547,27 @@ EXIT_LOOP:
static u32 user_va2_pa(struct mm_struct *mm, u32 address)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, address);
- if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
- pmd = pmd_offset(pgd, address);
- if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
- ptep = pte_offset_map(pmd, address);
- if (ptep) {
- pte = *ptep;
- if (pte_present(pte))
- return pte & PAGE_MASK;
- }
- }
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ return 0;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || pud_bad(*pud))
+ return 0;
+
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ return 0;
+
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
}
return 0;
diff --git a/drivers/staging/tidspbridge/dynload/cload.c b/drivers/staging/tidspbridge/dynload/cload.c
index fe1ef0a..9d54744 100644
--- a/drivers/staging/tidspbridge/dynload/cload.c
+++ b/drivers/staging/tidspbridge/dynload/cload.c
@@ -14,6 +14,8 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
+#include <linux/slab.h>
+
#include "header.h"
#include "module_list.h"
@@ -706,6 +708,7 @@ static void dload_symbols(struct dload_state *dlthis)
struct local_symbol *sp;
struct dynload_symbol *symp;
struct dynload_symbol *newsym;
+ struct doff_syment_t *my_sym_buf;
sym_count = dlthis->dfile_hdr.df_no_syms;
if (sym_count == 0)
@@ -739,13 +742,18 @@ static void dload_symbols(struct dload_state *dlthis)
become defined from the global symbol table */
checks = dlthis->verify.dv_sym_tab_checksum;
symbols_left = sym_count;
+
+ my_sym_buf = kzalloc(sizeof(*my_sym_buf) * MY_SYM_BUF_SIZ, GFP_KERNEL);
+ if (!my_sym_buf)
+ return;
+
do { /* read all symbols */
char *sname;
u32 val;
s32 delta;
struct doff_syment_t *input_sym;
unsigned syms_in_buf;
- struct doff_syment_t my_sym_buf[MY_SYM_BUF_SIZ];
+
input_sym = my_sym_buf;
syms_in_buf = symbols_left > MY_SYM_BUF_SIZ ?
MY_SYM_BUF_SIZ : symbols_left;
@@ -753,7 +761,7 @@ static void dload_symbols(struct dload_state *dlthis)
if (dlthis->strm->read_buffer(dlthis->strm, input_sym, siz) !=
siz) {
DL_ERROR(readstrm, sym_errid);
- return;
+ goto free_sym_buf;
}
if (dlthis->reorder_map)
dload_reorder(input_sym, siz, dlthis->reorder_map);
@@ -856,7 +864,7 @@ static void dload_symbols(struct dload_state *dlthis)
DL_ERROR("Absolute symbol %s is "
"defined multiple times with "
"different values", sname);
- return;
+ goto free_sym_buf;
}
}
loop_itr:
@@ -887,6 +895,9 @@ loop_cont:
if (~checks)
dload_error(dlthis, "Checksum of symbols failed");
+free_sym_buf:
+ kfree(my_sym_buf);
+ return;
} /* dload_symbols */
/*****************************************************************************
@@ -1116,6 +1127,11 @@ static int relocate_packet(struct dload_state *dlthis,
/* VERY dangerous */
static const char imagepak[] = { "image packet" };
+struct img_buffer {
+ struct image_packet_t ipacket;
+ u8 bufr[BYTE_TO_HOST(IMAGE_PACKET_SIZE)];
+};
+
/*************************************************************************
* Procedure dload_data
*
@@ -1131,16 +1147,16 @@ static void dload_data(struct dload_state *dlthis)
u16 curr_sect;
struct doff_scnhdr_t *sptr = dlthis->sect_hdrs;
struct ldr_section_info *lptr = dlthis->ldr_sections;
+ struct img_buffer *ibuf;
u8 *dest;
- struct {
- struct image_packet_t ipacket;
- u8 bufr[BYTE_TO_HOST(IMAGE_PACKET_SIZE)];
- } ibuf;
-
/* Indicates whether CINIT processing has occurred */
bool cinit_processed = false;
+ ibuf = kzalloc(sizeof(*ibuf), GFP_KERNEL);
+ if (!ibuf)
+ return;
+
/* Loop through the sections and load them one at a time.
*/
for (curr_sect = 0; curr_sect < dlthis->dfile_hdr.df_no_scns;
@@ -1168,37 +1184,37 @@ static void dload_data(struct dload_state *dlthis)
/* get the fixed header bits */
if (dlthis->strm->read_buffer(dlthis->strm,
- &ibuf.ipacket,
+ &ibuf->ipacket,
IPH_SIZE) !=
IPH_SIZE) {
DL_ERROR(readstrm, imagepak);
- return;
+ goto free_ibuf;
}
/* reorder the header if need be */
if (dlthis->reorder_map) {
- dload_reorder(&ibuf.ipacket, IPH_SIZE,
+ dload_reorder(&ibuf->ipacket, IPH_SIZE,
dlthis->reorder_map);
}
/* now read the rest of the packet */
ipsize =
BYTE_TO_HOST(DOFF_ALIGN
- (ibuf.ipacket.packet_size));
+ (ibuf->ipacket.packet_size));
if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) {
DL_ERROR("Bad image packet size %d",
ipsize);
- return;
+ goto free_ibuf;
}
- dest = ibuf.bufr;
+ dest = ibuf->bufr;
/* End of determination */
if (dlthis->strm->read_buffer(dlthis->strm,
- ibuf.bufr,
+ ibuf->bufr,
ipsize) !=
ipsize) {
DL_ERROR(readstrm, imagepak);
- return;
+ goto free_ibuf;
}
- ibuf.ipacket.img_data = dest;
+ ibuf->ipacket.img_data = dest;
/* reorder the bytes if need be */
#if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16)
@@ -1225,16 +1241,16 @@ static void dload_data(struct dload_state *dlthis)
#endif
#endif
- checks += dload_checksum(&ibuf.ipacket,
+ checks += dload_checksum(&ibuf->ipacket,
IPH_SIZE);
/* relocate the image bits as needed */
- if (ibuf.ipacket.num_relocs) {
+ if (ibuf->ipacket.num_relocs) {
dlthis->image_offset = image_offset;
if (!relocate_packet(dlthis,
- &ibuf.ipacket,
+ &ibuf->ipacket,
&checks,
&tramp_generated))
- return; /* serious error */
+ goto free_ibuf; /* error */
}
if (~checks)
DL_ERROR(err_checksum, imagepak);
@@ -1249,20 +1265,20 @@ static void dload_data(struct dload_state *dlthis)
if (dload_check_type(sptr,
DLOAD_CINIT)) {
cload_cinit(dlthis,
- &ibuf.ipacket);
+ &ibuf->ipacket);
cinit_processed = true;
} else {
/* FIXME */
if (!dlthis->myio->
writemem(dlthis->
myio,
- ibuf.bufr,
+ ibuf->bufr,
lptr->
load_addr +
image_offset,
lptr,
BYTE_TO_HOST
- (ibuf.
+ (ibuf->
ipacket.
packet_size))) {
DL_ERROR
@@ -1276,7 +1292,7 @@ static void dload_data(struct dload_state *dlthis)
}
}
image_offset +=
- BYTE_TO_TADDR(ibuf.ipacket.packet_size);
+ BYTE_TO_TADDR(ibuf->ipacket.packet_size);
} /* process packets */
/* if this is a BSS section, we may want to fill it */
if (!dload_check_type(sptr, DLOAD_BSS))
@@ -1334,6 +1350,9 @@ loop_cont:
DL_ERROR("Finalization of auto-trampolines (size = " FMT_UI32
") failed", dlthis->tramp.tramp_sect_next_addr);
}
+free_ibuf:
+ kfree(ibuf);
+ return;
} /* dload_data */
/*************************************************************************