changelog shortlog tags branches changeset files revisions annotate raw help

Mercurial > hg > plan9front / sys/src/9/bcm/mmu.c

changeset 7234: fc141b91ed8a
parent: 85c4b38d9a82
child: 55d93e47a2de
author: cinap_lenrek@felloff.net
date: Mon, 13 May 2019 19:12:41 +0200
permissions: -rw-r--r--
description: bcm, bcm64: preserve memsize across reboots, avoid trashing atags while parsing cmdline

we override atag memory on reboot, so preserve
the memsize learned from atag as *maxmem plan9
variable. the global memsize variable is not
needed anymore.

avoid trashing the following atag when zero
terminating the cmdline string.

zero memory after plan9.ini variables.
1 #include "u.h"
2 #include "../port/lib.h"
3 #include "mem.h"
4 #include "dat.h"
5 #include "fns.h"
6 
7 #include "arm.h"
8 
9 #define FEXT(d, o, w) (((d)>>(o)) & ((1<<(w))-1))
10 #define L1X(va) FEXT((va), 20, 12)
11 #define L2X(va) FEXT((va), 12, 8)
12 #define L2AP(ap) l2ap(ap)
13 #define L1ptedramattrs soc.l1ptedramattrs
14 #define L2ptedramattrs soc.l2ptedramattrs
15 #define PTEDRAM (PHYSDRAM|Dom0|L1AP(Krw)|Section|L1ptedramattrs)
16 
17 enum {
18  L1lo = UZERO/MiB, /* L1X(UZERO)? */
19  L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
20  L2size = 256*sizeof(PTE),
21 };
22 
23 /*
24  * Set up initial PTEs for cpu0 (called with mmu off)
25  */
26 void
27 mmuinit(void *a)
28 {
29  PTE *l1, *l2;
30  uintptr pa, va;
31 
32  l1 = (PTE*)a;
33  l2 = (PTE*)PADDR(L2);
34 
35  /*
36  * map all of ram at KZERO
37  */
38  va = KZERO;
39  for(pa = PHYSDRAM; pa < PHYSDRAM+soc.dramsize; pa += MiB){
40  l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|L1ptedramattrs;
41  va += MiB;
42  }
43 
44  /*
45  * identity map first MB of ram so mmu can be enabled
46  */
47  l1[L1X(PHYSDRAM)] = PTEDRAM;
48 
49  /*
50  * map i/o registers
51  */
52  va = VIRTIO;
53  for(pa = soc.physio; pa < soc.physio+IOSIZE; pa += MiB){
54  l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
55  va += MiB;
56  }
57  pa = soc.armlocal;
58  if(pa)
59  l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
60 
61  /*
62  * double map exception vectors near top of virtual memory
63  */
64  va = HVECTORS;
65  l1[L1X(va)] = (uintptr)l2|Dom0|Coarse;
66  l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small|L2ptedramattrs;
67 }
68 
69 /*
70  * enable/disable identity map of first MB of ram
71  */
72 void
73 mmuinit1(int on)
74 {
75  PTE *l1;
76 
77  l1 = m->mmul1;
78  l1[L1X(PHYSDRAM)] = on? PTEDRAM: Fault;
79  cachedwbtlb(&l1[L1X(PHYSDRAM)], sizeof(PTE));
80  mmuinvalidateaddr(PHYSDRAM);
81  mmuinvalidate();
82 }
83 
84 static void
85 mmul2empty(Proc* proc, int clear)
86 {
87  PTE *l1;
88  Page **l2, *page;
89 
90  l1 = m->mmul1;
91  l2 = &proc->mmul2;
92  for(page = *l2; page != nil; page = page->next){
93  if(clear)
94  memset((void*)page->va, 0, L2size);
95  l1[page->daddr] = Fault;
96  l2 = &page->next;
97  }
98  coherence();
99  *l2 = proc->mmul2cache;
100  proc->mmul2cache = proc->mmul2;
101  proc->mmul2 = nil;
102 }
103 
104 static void
105 mmul1empty(void)
106 {
107  PTE *l1;
108 
109  /* clean out any user mappings still in l1 */
110  if(m->mmul1lo > 0){
111  if(m->mmul1lo == 1)
112  m->mmul1[L1lo] = Fault;
113  else
114  memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
115  m->mmul1lo = 0;
116  }
117  if(m->mmul1hi > 0){
118  l1 = &m->mmul1[L1hi - m->mmul1hi];
119  if(m->mmul1hi == 1)
120  *l1 = Fault;
121  else
122  memset(l1, 0, m->mmul1hi*sizeof(PTE));
123  m->mmul1hi = 0;
124  }
125 }
126 
127 void
128 mmuswitch(Proc* proc)
129 {
130  int x;
131  PTE *l1;
132  Page *page;
133 
134  if(proc != nil && proc->newtlb){
135  mmul2empty(proc, 1);
136  proc->newtlb = 0;
137  }
138 
139  mmul1empty();
140 
141  /* move in new map */
142  l1 = m->mmul1;
143  if(proc != nil)
144  for(page = proc->mmul2; page != nil; page = page->next){
145  x = page->daddr;
146  l1[x] = PPN(page->pa)|Dom0|Coarse;
147  if(x >= L1lo + m->mmul1lo && x < L1hi - m->mmul1hi){
148  if(x+1 - L1lo < L1hi - x)
149  m->mmul1lo = x+1 - L1lo;
150  else
151  m->mmul1hi = L1hi - x;
152  }
153  }
154 
155  /* make sure map is in memory */
156  /* could be smarter about how much? */
157  cachedwbtlb(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
158 
159  /* lose any possible stale tlb entries */
160  mmuinvalidate();
161 }
162 
163 void
164 flushmmu(void)
165 {
166  int s;
167 
168  s = splhi();
169  up->newtlb = 1;
170  mmuswitch(up);
171  splx(s);
172 }
173 
174 void
175 mmurelease(Proc* proc)
176 {
177  Page *page, *next;
178 
179  mmul2empty(proc, 0);
180  for(page = proc->mmul2cache; page != nil; page = next){
181  next = page->next;
182  if(--page->ref)
183  panic("mmurelease: page->ref %lud", page->ref);
184  pagechainhead(page);
185  }
186  if(proc->mmul2cache != nil)
187  pagechaindone();
188  proc->mmul2cache = nil;
189 
190  mmul1empty();
191 
192  /* make sure map is in memory */
193  /* could be smarter about how much? */
194  cachedwbtlb(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
195 
196  /* lose any possible stale tlb entries */
197  mmuinvalidate();
198 }
199 
200 void
201 putmmu(uintptr va, uintptr pa, Page* page)
202 {
203  int x, s;
204  Page *pg;
205  PTE *l1, *pte;
206 
207  /*
208  * disable interrupts to prevent flushmmu (called from hzclock)
209  * from clearing page tables while we are setting them
210  */
211  s = splhi();
212  x = L1X(va);
213  l1 = &m->mmul1[x];
214  if(*l1 == Fault){
215  /* l2 pages only have 256 entries - wastes 3K per 1M of address space */
216  if(up->mmul2cache == nil){
217  spllo();
218  pg = newpage(1, 0, 0);
219  splhi();
220  /* if newpage slept, we might be on a different cpu */
221  l1 = &m->mmul1[x];
222  pg->va = VA(kmap(pg));
223  }else{
224  pg = up->mmul2cache;
225  up->mmul2cache = pg->next;
226  }
227  pg->daddr = x;
228  pg->next = up->mmul2;
229  up->mmul2 = pg;
230 
231  /* force l2 page to memory (armv6) */
232  cachedwbtlb((void *)pg->va, L2size);
233 
234  *l1 = PPN(pg->pa)|Dom0|Coarse;
235  cachedwbtlb(l1, sizeof *l1);
236 
237  if(x >= L1lo + m->mmul1lo && x < L1hi - m->mmul1hi){
238  if(x+1 - L1lo < L1hi - x)
239  m->mmul1lo = x+1 - L1lo;
240  else
241  m->mmul1hi = L1hi - x;
242  }
243  }
244  pte = KADDR(PPN(*l1));
245 
246  /* protection bits are
247  * PTERONLY|PTEVALID;
248  * PTEWRITE|PTEVALID;
249  * PTEWRITE|PTEUNCACHED|PTEVALID;
250  */
251  x = Small;
252  if(!(pa & PTEUNCACHED))
253  x |= L2ptedramattrs;
254  if(pa & PTEWRITE)
255  x |= L2AP(Urw);
256  else
257  x |= L2AP(Uro);
258  pte[L2X(va)] = PPN(pa)|x;
259  cachedwbtlb(&pte[L2X(va)], sizeof(PTE));
260 
261  /* clear out the current entry */
262  mmuinvalidateaddr(PPN(va));
263 
264  if((page->txtflush & (1<<m->machno)) != 0){
265  /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
266  cachedwbse((void*)(page->pa|KZERO), BY2PG);
267  cacheiinvse((void*)page->va, BY2PG);
268  page->txtflush &= ~(1<<m->machno);
269  }
270  //checkmmu(va, PPN(pa));
271  splx(s);
272 }
273 
274 void*
275 mmuuncache(void* v, usize size)
276 {
277  int x;
278  PTE *pte;
279  uintptr va;
280 
281  /*
282  * Simple helper for ucalloc().
283  * Uncache a Section, must already be
284  * valid in the MMU.
285  */
286  va = (uintptr)v;
287  assert(!(va & (1*MiB-1)) && size == 1*MiB);
288 
289  x = L1X(va);
290  pte = &m->mmul1[x];
291  if((*pte & (Fine|Section|Coarse)) != Section)
292  return nil;
293  *pte &= ~L1ptedramattrs;
294  mmuinvalidateaddr(va);
295  cachedwbinvse(pte, 4);
296 
297  return v;
298 }
299 
300 /*
301  * Return the number of bytes that can be accessed via KADDR(pa).
302  * If pa is not a valid argument to KADDR, return 0.
303  */
304 uintptr
305 cankaddr(uintptr pa)
306 {
307  if(pa < PHYSDRAM+soc.dramsize)
308  return PHYSDRAM+soc.dramsize - pa;
309  return 0;
310 }
311 
312 uintptr
313 mmukmap(uintptr va, uintptr pa, usize size)
314 {
315  int o;
316  usize n;
317  PTE *pte, *pte0;
318 
319  assert((va & (MiB-1)) == 0);
320  o = pa & (MiB-1);
321  pa -= o;
322  size += o;
323  pte = pte0 = &m->mmul1[L1X(va)];
324  for(n = 0; n < size; n += MiB)
325  if(*pte++ != Fault)
326  return 0;
327  pte = pte0;
328  for(n = 0; n < size; n += MiB){
329  *pte++ = (pa+n)|Dom0|L1AP(Krw)|Section;
330  mmuinvalidateaddr(va+n);
331  }
332  cachedwbtlb(pte0, (uintptr)pte - (uintptr)pte0);
333  return va + o;
334 }
335 
336 void
337 checkmmu(uintptr va, uintptr pa)
338 {
339  int x;
340  PTE *l1, *pte;
341 
342  x = L1X(va);
343  l1 = &m->mmul1[x];
344  if(*l1 == Fault){
345  iprint("checkmmu cpu%d va=%lux l1 %p=%ux\n", m->machno, va, l1, *l1);
346  return;
347  }
348  pte = KADDR(PPN(*l1));
349  pte += L2X(va);
350  if(pa == ~0 || (pa != 0 && PPN(*pte) != pa))
351  iprint("checkmmu va=%lux pa=%lux l1 %p=%ux pte %p=%ux\n", va, pa, l1, *l1, pte, *pte);
352 }
353 
354 void
355 kunmap(KMap *k)
356 {
357  USED(k);
358  coherence();
359 }