1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
|
/*
* Broadcom specific AMBA
* Core ops
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/export.h>
#include <linux/bcma/bcma.h>
bool bcma_core_is_enabled(struct bcma_device *core)
{
if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC))
!= BCMA_IOCTL_CLK)
return false;
if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return false;
return true;
}
EXPORT_SYMBOL_GPL(bcma_core_is_enabled);
void bcma_core_disable(struct bcma_device *core, u32 flags)
{
if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return;
bcma_awrite32(core, BCMA_IOCTL, flags);
bcma_aread32(core, BCMA_IOCTL);
udelay(10);
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
bcma_aread32(core, BCMA_RESET_CTL);
udelay(1);
}
EXPORT_SYMBOL_GPL(bcma_core_disable);
int bcma_core_enable(struct bcma_device *core, u32 flags)
{
bcma_core_disable(core, flags);
bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
bcma_aread32(core, BCMA_IOCTL);
bcma_awrite32(core, BCMA_RESET_CTL, 0);
udelay(1);
bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
bcma_aread32(core, BCMA_IOCTL);
udelay(1);
return 0;
}
EXPORT_SYMBOL_GPL(bcma_core_enable);
void bcma_core_set_clockmode(struct bcma_device *core,
enum bcma_clkmode clkmode)
{
u16 i;
WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
core->id.id != BCMA_CORE_PCIE &&
core->id.id != BCMA_CORE_80211);
switch (clkmode) {
case BCMA_CLKMODE_FAST:
bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
usleep_range(64, 300);
for (i = 0; i < 1500; i++) {
if (bcma_read32(core, BCMA_CLKCTLST) &
BCMA_CLKCTLST_HAVEHT) {
i = 0;
break;
}
udelay(10);
}
if (i)
bcma_err(core->bus, "HT force timeout\n");
break;
case BCMA_CLKMODE_DYNAMIC:
bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
break;
}
}
EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);
void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
{
u16 i;
WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);
if (on) {
bcma_set32(core, BCMA_CLKCTLST, req);
for (i = 0; i < 10000; i++) {
if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
status) {
i = 0;
break;
}
udelay(10);
}
if (i)
bcma_err(core->bus, "PLL enable timeout\n");
} else {
/*
* Mask the PLL but don't wait for it to be disabled. PLL may be
* shared between cores and will be still up if there is another
* core using it.
*/
bcma_mask32(core, BCMA_CLKCTLST, ~req);
bcma_read32(core, BCMA_CLKCTLST);
}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
u32 bcma_core_dma_translation(struct bcma_device *core)
{
switch (core->bus->hosttype) {
case BCMA_HOSTTYPE_SOC:
return 0;
case BCMA_HOSTTYPE_PCI:
if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
return BCMA_DMA_TRANSLATION_DMA64_CMT;
else
return BCMA_DMA_TRANSLATION_DMA32_CMT;
default:
bcma_err(core->bus, "DMA translation unknown for host %d\n",
core->bus->hosttype);
}
return BCMA_DMA_TRANSLATION_NONE;
}
EXPORT_SYMBOL(bcma_core_dma_translation);
|