| 1 | #include <vm/page.h>
|
|---|
| 2 |
|
|---|
| 3 | /*
|
|---|
| 4 | * State for naming pages created using vbi_large_page_alloc().
|
|---|
| 5 | */
|
|---|
| 6 | static kmutex_t Vboxoff_mutex;
|
|---|
| 7 | static u_offset_t Vboxoff;
|
|---|
| 8 | static vnode_t Vboxvp;
|
|---|
| 9 |
|
|---|
| 10 | /*
|
|---|
| 11 | * Allocate one large page, of size "pgsz" bytes. "cansleep" is non-zero
|
|---|
| 12 | * if the allocation can block. Note that even if cansleep is non-zero,
|
|---|
| 13 | * the allocation may fail.
|
|---|
| 14 | *
|
|---|
| 15 | * On success, the physical address corresponding to the page will be
|
|---|
| 16 | * stored in *pphys, and the page_t will be returned SE_SHARED-locked.
|
|---|
| 17 | * On failure, NULL is returned.
|
|---|
| 18 | */
|
|---|
| 19 | page_t *
|
|---|
| 20 | vbi_large_page_alloc(uint64_t *pphys, size_t pgsz, uint_t cansleep)
|
|---|
| 21 | {
|
|---|
| 22 | const pgcnt_t npgs = btop(pgsz);
|
|---|
| 23 | const uint_t pcflags =
|
|---|
| 24 | (cansleep ? PG_WAIT : 0) | PG_NORELOC | PG_EXCL;
|
|---|
| 25 | struct seg kseg;
|
|---|
| 26 | page_t *pplist, *rpp;
|
|---|
| 27 | u_offset_t off;
|
|---|
| 28 | uint_t i;
|
|---|
| 29 |
|
|---|
| 30 | ASSERT3U(pgsz, >, PAGESIZE);
|
|---|
| 31 |
|
|---|
| 32 | if (page_resv(npgs, KM_NOSLEEP) == 0) {
|
|---|
| 33 | return (NULL);
|
|---|
| 34 | }
|
|---|
| 35 |
|
|---|
| 36 | mutex_enter(&Vboxoff_mutex);
|
|---|
| 37 | off = P2ROUNDUP(Vboxoff, pgsz);
|
|---|
| 38 | Vboxoff = off + pgsz;
|
|---|
| 39 | mutex_exit(&Vboxoff_mutex);
|
|---|
| 40 |
|
|---|
| 41 | kseg.s_as = &kas;
|
|---|
| 42 | pplist = page_create_va_large(&Vboxvp, off, pgsz,
|
|---|
| 43 | pcflags, &kseg, 0, NULL);
|
|---|
| 44 | if (pplist == NULL) {
|
|---|
| 45 | page_unresv(npgs);
|
|---|
| 46 | return (NULL);
|
|---|
| 47 | }
|
|---|
| 48 |
|
|---|
| 49 | rpp = pplist;
|
|---|
| 50 | for (i = 0; i < npgs; i++) {
|
|---|
| 51 | page_t *pp;
|
|---|
| 52 |
|
|---|
| 53 | ASSERT(pplist != NULL);
|
|---|
| 54 | pp = pplist;
|
|---|
| 55 | page_sub(&pplist, pp);
|
|---|
| 56 | page_io_unlock(pp);
|
|---|
| 57 | }
|
|---|
| 58 |
|
|---|
| 59 | *pphys = ptob(rpp->p_pagenum);
|
|---|
| 60 | return (rpp);
|
|---|
| 61 | }
|
|---|
| 62 |
|
|---|
| 63 | /*
|
|---|
| 64 | * Free one large page that was allocated by vbi_large_page_alloc().
|
|---|
| 65 | */
|
|---|
| 66 | void
|
|---|
| 67 | vbi_large_page_free(page_t *rpp, size_t pgsz)
|
|---|
| 68 | {
|
|---|
| 69 | const pgcnt_t npgs = btop(pgsz);
|
|---|
| 70 | uint_t i;
|
|---|
| 71 |
|
|---|
| 72 | ASSERT(IS_P2ALIGNED(pp->p_pagenum, npgs));
|
|---|
| 73 | ASSERT3U(pgsz, >, PAGESIZE);
|
|---|
| 74 | ASSERT3U(page_get_pagesize(pp->p_szc), ==, pgsz);
|
|---|
| 75 |
|
|---|
| 76 | for (i = 0; i < npgs; i++) {
|
|---|
| 77 | page_t *const pp = rpp + i;
|
|---|
| 78 | u_offset_t off = pp->p_offset;
|
|---|
| 79 |
|
|---|
| 80 | if (!page_tryupgrade(pp)) {
|
|---|
| 81 | page_t *npp;
|
|---|
| 82 |
|
|---|
| 83 | page_unlock(pp);
|
|---|
| 84 | npp = page_lookup(&Vboxvp, off, SE_EXCL);
|
|---|
| 85 | if (npp != pp) {
|
|---|
| 86 | panic("vbi_large_page_free(): lookup of
|
|---|
| 87 | <%p, %lx> returned %p rather than %p",
|
|---|
| 88 | &Vboxvp, off, npp, pp);
|
|---|
| 89 | }
|
|---|
| 90 | }
|
|---|
| 91 | }
|
|---|
| 92 |
|
|---|
| 93 | page_destroy_pages(rpp);
|
|---|
| 94 | page_unresv(npgs);
|
|---|
| 95 | }
|
|---|