changelog shortlog tags branches changeset files revisions annotate raw help

Mercurial > hg > plan9front / sys/src/9/bcm64/mmu.c

changeset 7239: c0e23a8829f7
parent: 9fe2319844b6
child: 2e8af1bf191d
author: cinap_lenrek@felloff.net
date: Wed, 15 May 2019 16:19:20 +0200
permissions: -rw-r--r--
description: bcm64: generalize mmu code

make user page table list heads arrays so we can
index into the right level avoiding the special
cases for differen PTLEVELS.
1 #include "u.h"
2 #include "../port/lib.h"
3 #include "mem.h"
4 #include "dat.h"
5 #include "fns.h"
6 #include "sysreg.h"
7 
8 void
9 mmu0init(uintptr *l1)
10 {
11  uintptr va, pa, pe;
12 
13  /* 0 identity map */
14  pe = PHYSDRAM + soc.dramsize;
15  if(pe > (uintptr)-KZERO)
16  pe = (uintptr)-KZERO;
17 
18  for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(1))
19  l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
20  | PTEKERNEL | PTESH(SHARE_INNER);
21  if(PTLEVELS > 2)
22  for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(2))
23  l1[PTL1X(pa, 2)] = (uintptr)&l1[L1TABLEX(pa, 1)] | PTEVALID | PTETABLE;
24  if(PTLEVELS > 3)
25  for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(3))
26  l1[PTL1X(pa, 3)] = (uintptr)&l1[L1TABLEX(pa, 2)] | PTEVALID | PTETABLE;
27 
28  /* KZERO */
29  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
30  l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
31  | PTEKERNEL | PTESH(SHARE_INNER);
32  if(PTLEVELS > 2)
33  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
34  l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
35  if(PTLEVELS > 3)
36  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
37  l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
38 
39  /* VIRTIO */
40  pe = -VIRTIO + soc.physio;
41  for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
42  l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
43  | PTEKERNEL | PTESH(SHARE_OUTER) | PTEDEVICE;
44  if(PTLEVELS > 2)
45  for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
46  l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
47  if(PTLEVELS > 3)
48  for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
49  l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
50 }
51 
52 void
53 mmu0clear(uintptr *l1)
54 {
55  uintptr va, pa, pe;
56 
57  pe = PHYSDRAM + soc.dramsize;
58  if(pe > (uintptr)-KZERO)
59  pe = (uintptr)-KZERO;
60 
61  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
62  if(PTL1X(pa, 1) != PTL1X(va, 1))
63  l1[PTL1X(pa, 1)] = 0;
64  }
65  if(PTLEVELS > 2)
66  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2)){
67  if(PTL1X(pa, 2) != PTL1X(va, 2))
68  l1[PTL1X(pa, 2)] = 0;
69  }
70  if(PTLEVELS > 3)
71  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3)){
72  if(PTL1X(pa, 3) != PTL1X(va, 3))
73  l1[PTL1X(pa, 3)] = 0;
74  }
75 }
76 
77 void
78 mmuidmap(uintptr *l1)
79 {
80  uintptr va, pa, pe;
81 
82  mmuswitch(nil);
83  flushtlb();
84 
85  pe = PHYSDRAM + soc.dramsize;
86  if(pe > (uintptr)-KZERO)
87  pe = (uintptr)-KZERO;
88 
89  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
90  if(PTL1X(pa, 1) != PTL1X(va, 1))
91  l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
92  | PTEKERNEL | PTESH(SHARE_INNER);
93  }
94  if(PTLEVELS > 2)
95  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2)){
96  if(PTL1X(pa, 2) != PTL1X(va, 2))
97  l1[PTL1X(pa, 2)] = PADDR(&l1[L1TABLEX(pa, 1)]) | PTEVALID | PTETABLE;
98  }
99  if(PTLEVELS > 3)
100  for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3)){
101  if(PTL1X(pa, 3) != PTL1X(va, 3))
102  l1[PTL1X(pa, 3)] = PADDR(&l1[L1TABLEX(pa, 2)]) | PTEVALID | PTETABLE;
103  }
104  setttbr(PADDR(&l1[L1TABLEX(0, PTLEVELS-1)]));
105 }
106 
107 void
108 mmu1init(void)
109 {
110  m->mmul1 = mallocalign(L1SIZE+L1TOPSIZE, BY2PG, L1SIZE, 0);
111  if(m->mmul1 == nil)
112  panic("mmu1init: no memory for mmul1");
113  memset(m->mmul1, 0, L1SIZE+L1TOPSIZE);
114  mmuswitch(nil);
115 }
116 
117 uintptr
118 paddr(void *va)
119 {
120  if((uintptr)va >= KZERO)
121  return (uintptr)va-KZERO;
122  panic("paddr: va=%#p pc=%#p", va, getcallerpc(&va));
123  return 0;
124 }
125 
126 uintptr
127 cankaddr(uintptr pa)
128 {
129  if(pa < (uintptr)-KZERO)
130  return -KZERO - pa;
131  return 0;
132 }
133 
134 void*
135 kaddr(uintptr pa)
136 {
137  if(pa < (uintptr)-KZERO)
138  return (void*)(pa + KZERO);
139  panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
140  return nil;
141 }
142 
143 void
144 kmapinval(void)
145 {
146 }
147 
148 KMap*
149 kmap(Page *p)
150 {
151  return kaddr(p->pa);
152 }
153 
154 void
155 kunmap(KMap*)
156 {
157 }
158 
159 uintptr
160 mmukmap(uintptr va, uintptr pa, usize size)
161 {
162  uintptr a, pe, off, attr;
163 
164  if(va == 0)
165  return 0;
166 
167  attr = va & PTEMA(7);
168  va &= -PGLSZ(1);
169  off = pa % PGLSZ(1);
170  a = va + off;
171  pe = (pa + size + (PGLSZ(1)-1)) & -PGLSZ(1);
172  while(pa < pe){
173  ((uintptr*)L1)[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
174  | PTEKERNEL | PTESH(SHARE_OUTER) | attr;
175  pa += PGLSZ(1);
176  va += PGLSZ(1);
177  }
178  flushtlb();
179  return a;
180 }
181 
182 static uintptr*
183 mmuwalk(uintptr va, int level)
184 {
185  uintptr *table, pte;
186  Page *pg;
187  int i, x;
188 
189  x = PTLX(va, PTLEVELS-1);
190  table = &m->mmul1[L1TABLEX(va, PTLEVELS-1)];
191  for(i = PTLEVELS-2; i >= level; i--){
192  pte = table[x];
193  if(pte & PTEVALID) {
194  if(pte & (0xFFFFULL<<48))
195  iprint("strange pte %#p va %#p\n", pte, va);
196  pte &= ~(0xFFFFULL<<48 | BY2PG-1);
197  table = KADDR(pte);
198  } else {
199  pg = up->mmufree;
200  if(pg == nil)
201  return nil;
202  up->mmufree = pg->next;
203  pg->va = va & -PGLSZ(i+1);
204  if((pg->next = up->mmuhead[i+1]) == nil)
205  up->mmutail[i+1] = pg;
206  up->mmuhead[i+1] = pg;
207  memset(KADDR(pg->pa), 0, BY2PG);
208  coherence();
209  table[x] = pg->pa | PTEVALID | PTETABLE;
210  table = KADDR(pg->pa);
211  }
212  x = PTLX(va, (uintptr)i);
213  }
214  return &table[x];
215 }
216 
217 static Proc *asidlist[256];
218 
219 static int
220 allocasid(Proc *p)
221 {
222  static Lock lk;
223  Proc *x;
224  int a;
225 
226  lock(&lk);
227  a = p->asid;
228  if(a < 0)
229  a = -a;
230  if(a == 0)
231  a = p->pid;
232  for(;; a++){
233  a %= nelem(asidlist);
234  if(a == 0)
235  continue; // reserved
236  x = asidlist[a];
237  if(x == p || x == nil || (x->asid < 0 && x->mach == nil))
238  break;
239  }
240  p->asid = a;
241  asidlist[a] = p;
242  unlock(&lk);
243 
244  return x != p;
245 }
246 
247 static void
248 freeasid(Proc *p)
249 {
250  int a;
251 
252  a = p->asid;
253  if(a < 0)
254  a = -a;
255  if(a > 0 && asidlist[a] == p)
256  asidlist[a] = nil;
257  p->asid = 0;
258 }
259 
260 void
261 putasid(Proc *p)
262 {
263  /*
264  * Prevent the following scenario:
265  * pX sleeps on cpuA, leaving its page tables in mmul1
266  * pX wakes up on cpuB, and exits, freeing its page tables
267  * pY on cpuB allocates a freed page table page and overwrites with data
268  * cpuA takes an interrupt, and is now running with bad page tables
269  * In theory this shouldn't hurt because only user address space tables
270  * are affected, and mmuswitch will clear mmul1 before a user process is
271  * dispatched. But empirically it correlates with weird problems, eg
272  * resetting of the core clock at 0x4000001C which confuses local timers.
273  */
274  if(conf.nmach > 1)
275  mmuswitch(nil);
276 
277  if(p->asid > 0)
278  p->asid = -p->asid;
279 }
280 
281 void
282 putmmu(uintptr va, uintptr pa, Page *pg)
283 {
284  uintptr *pte, old;
285  int s;
286 
287  s = splhi();
288  while((pte = mmuwalk(va, 0)) == nil){
289  spllo();
290  assert(up->mmufree == nil);
291  up->mmufree = newpage(0, nil, 0);
292  splhi();
293  }
294  old = *pte;
295  *pte = 0;
296  if((old & PTEVALID) != 0)
297  flushasidvall((uvlong)up->asid<<48 | va>>12);
298  else
299  flushasidva((uvlong)up->asid<<48 | va>>12);
300  *pte = pa | PTEPAGE | PTEUSER | PTENG | PTEAF | PTESH(SHARE_INNER);
301  if(pg->txtflush & (1UL<<m->machno)){
302  /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
303  cachedwbinvse((void*)KADDR(pg->pa), BY2PG);
304  cacheiinvse((void*)va, BY2PG);
305  pg->txtflush &= ~(1UL<<m->machno);
306  }
307  splx(s);
308 }
309 
310 static void
311 mmufree(Proc *p)
312 {
313  int i;
314 
315  freeasid(p);
316 
317  for(i=1; i<PTLEVELS; i++){
318  if(p->mmuhead[i] == nil)
319  break;
320  p->mmutail[i]->next = p->mmufree;
321  p->mmufree = p->mmuhead[i];
322  p->mmuhead[i] = p->mmutail[i] = nil;
323  }
324 }
325 
326 void
327 mmuswitch(Proc *p)
328 {
329  uintptr va;
330  Page *t;
331 
332  for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
333  m->mmul1[PTL1X(va, PTLEVELS-1)] = 0;
334 
335  if(p == nil){
336  setttbr(PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
337  return;
338  }
339 
340  if(p->newtlb){
341  mmufree(p);
342  p->newtlb = 0;
343  }
344 
345  for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
346  va = t->va;
347  m->mmul1[PTL1X(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
348  }
349 
350  if(allocasid(p))
351  flushasid((uvlong)p->asid<<48);
352 
353  setttbr((uvlong)p->asid<<48 | PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
354 }
355 
356 void
357 mmurelease(Proc *p)
358 {
359  Page *t;
360 
361  mmuswitch(nil);
362  mmufree(p);
363 
364  if((t = p->mmufree) != nil){
365  do {
366  p->mmufree = t->next;
367  if(--t->ref != 0)
368  panic("mmurelease: bad page ref");
369  pagechainhead(t);
370  } while((t = p->mmufree) != nil);
371  pagechaindone();
372  }
373 }
374 
375 void
376 flushmmu(void)
377 {
378  int x;
379 
380  x = splhi();
381  up->newtlb = 1;
382  mmuswitch(up);
383  splx(x);
384 }
385 
386 void
387 checkmmu(uintptr, uintptr)
388 {
389 }