Implement dynamic MTRRs

This commit is contained in:
RichardG867
2020-04-18 00:45:20 -03:00
parent 2cf88a5011
commit 984bfc8ad1
5 changed files with 328 additions and 28 deletions

View File

@@ -2854,7 +2854,7 @@ i686_invalid_rdmsr:
void cpu_WRMSR()
{
uint64_t temp;
uint64_t temp, temp2;
cpu_log("WRMSR %08X %08X%08X\n", ECX, EDX, EAX);
switch (machines[machine].cpu[cpu_manufacturer].cpus[cpu_effective].cpu_type)
@@ -2923,10 +2923,23 @@ void cpu_WRMSR()
break;
case 0x200: case 0x201: case 0x202: case 0x203: case 0x204: case 0x205: case 0x206: case 0x207:
case 0x208: case 0x209: case 0x20A: case 0x20B: case 0x20C: case 0x20D: case 0x20E: case 0x20F:
if (ECX & 1)
mtrr_physmask_msr[(ECX - 0x200) >> 1] = EAX | ((uint64_t)EDX << 32);
else
mtrr_physbase_msr[(ECX - 0x200) >> 1] = EAX | ((uint64_t)EDX << 32);
temp = EAX | ((uint64_t)EDX << 32);
temp2 = (ECX - 0x200) >> 1;
if (ECX & 1) {
cpu_log("MTRR physmask[%d] = %08llx\n", temp2, temp);
if ((mtrr_physmask_msr[temp2] >> 11) & 0x1)
mem_del_mtrr(mtrr_physbase_msr[temp2] & ~(0xFFF), mtrr_physmask_msr[temp2] & ~(0xFFF));
if ((temp >> 11) & 0x1)
mem_add_mtrr(mtrr_physbase_msr[temp2] & ~(0xFFF), temp & ~(0xFFF), mtrr_physbase_msr[temp2] & 0xFF);
mtrr_physmask_msr[temp2] = temp;
} else {
cpu_log("MTRR physbase[%d] = %08llx\n", temp2, temp);
mtrr_physbase_msr[temp2] = temp;
}
break;
case 0x250:
mtrr_fix64k_8000_msr = EAX | ((uint64_t)EDX << 32);
@@ -3222,11 +3235,24 @@ void cpu_WRMSR()
break;
case 0x200: case 0x201: case 0x202: case 0x203: case 0x204: case 0x205: case 0x206: case 0x207:
case 0x208: case 0x209: case 0x20A: case 0x20B: case 0x20C: case 0x20D: case 0x20E: case 0x20F:
if (ECX & 1)
mtrr_physmask_msr[(ECX - 0x200) >> 1] = EAX | ((uint64_t)EDX << 32);
else
mtrr_physbase_msr[(ECX - 0x200) >> 1] = EAX | ((uint64_t)EDX << 32);
break;
temp = EAX | ((uint64_t)EDX << 32);
temp2 = (ECX - 0x200) >> 1;
if (ECX & 1) {
cpu_log("MTRR physmask[%d] = %08llx\n", temp2, temp);
if ((mtrr_physmask_msr[temp2] >> 11) & 0x1)
mem_del_mtrr(mtrr_physbase_msr[temp2] & ~(0xFFF), mtrr_physmask_msr[temp2] & ~(0xFFF));
if ((temp >> 11) & 0x1)
mem_add_mtrr(mtrr_physbase_msr[temp2] & ~(0xFFF), temp & ~(0xFFF), mtrr_physbase_msr[temp2] & 0xFF);
mtrr_physmask_msr[temp2] = temp;
} else {
cpu_log("MTRR physbase[%d] = %08llx\n", temp2, temp);
mtrr_physbase_msr[temp2] = temp;
}
break;
case 0x250:
mtrr_fix64k_8000_msr = EAX | ((uint64_t)EDX << 32);
break;
@@ -3258,6 +3284,11 @@ i686_invalid_wrmsr:
}
}
void cpu_INVD(uint8_t wb)
{
mem_invalidate_mtrr(wb);
}
static int cyrix_addr;
static void cpu_write(uint16_t addr, uint8_t val, void *priv)

View File

@@ -515,6 +515,7 @@ extern void cpu_set(void);
extern void cpu_CPUID(void);
extern void cpu_RDMSR(void);
extern void cpu_WRMSR(void);
extern void cpu_INVD(uint8_t wb);
extern int checkio(int port);
extern void codegen_block_end(void);

View File

@@ -743,12 +743,14 @@ static int opCLTS(uint32_t fetchdat)
static int opINVD(uint32_t fetchdat)
{
cpu_INVD(0);
CLOCK_CYCLES(1000);
CPU_BLOCK_END();
return 0;
}
static int opWBINVD(uint32_t fetchdat)
{
cpu_INVD(1);
CLOCK_CYCLES(10000);
CPU_BLOCK_END();
return 0;

View File

@@ -330,6 +330,10 @@ extern void mem_init(void);
extern void mem_reset(void);
extern void mem_remap_top(int kb);
extern void mem_add_mtrr(uint64_t base, uint64_t mask, uint8_t type);
extern void mem_del_mtrr(uint64_t base, uint64_t mask);
extern void mem_invalidate_mtrr(uint8_t wb);
#ifdef EMU_CPU_H
static __inline uint32_t get_phys(uint32_t addr)

298
src/mem.c
View File

@@ -121,6 +121,8 @@ static mem_mapping_t *read_mapping[MEM_MAPPINGS_NO];
static mem_mapping_t *write_mapping[MEM_MAPPINGS_NO];
static uint8_t *_mem_exec[MEM_MAPPINGS_NO];
static int _mem_state[MEM_MAPPINGS_NO];
static uint8_t *mtrr_areas[MEM_MAPPINGS_NO];
static uint8_t mtrr_area_refcounts[MEM_MAPPINGS_NO];
#if FIXME
#if (MEM_GRANULARITY_BITS >= 12)
@@ -650,6 +652,8 @@ uint8_t
readmembl(uint32_t addr)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -663,7 +667,12 @@ readmembl(uint32_t addr)
}
addr = (uint32_t) (addr64 & rammask);
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr)
return mtrr[addr & MEM_GRANULARITY_MASK];
map = read_mapping[page];
if (map && map->read_b)
return map->read_b(addr, map->p);
@@ -675,6 +684,8 @@ void
writemembl(uint32_t addr, uint8_t val)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -692,7 +703,14 @@ writemembl(uint32_t addr, uint8_t val)
}
addr = (uint32_t) (addr64 & rammask);
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr) {
mtrr[addr & MEM_GRANULARITY_MASK] = val;
return;
}
map = write_mapping[page];
if (map && map->write_b)
map->write_b(addr, val, map->p);
}
@@ -703,6 +721,8 @@ uint16_t
readmemwl(uint32_t addr)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -731,7 +751,12 @@ readmemwl(uint32_t addr)
addr = (uint32_t) (addr64 & rammask);
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr)
return mtrr[addr & MEM_GRANULARITY_MASK] | ((uint16_t) (mtrr[(addr + 1) & MEM_GRANULARITY_MASK]) << 8);
map = read_mapping[page];
if (map && map->read_w)
return map->read_w(addr, map->p);
@@ -747,6 +772,8 @@ void
writememwl(uint32_t addr, uint16_t val)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -784,7 +811,15 @@ writememwl(uint32_t addr, uint16_t val)
addr = (uint32_t) (addr64 & rammask);
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr) {
mtrr[addr & MEM_GRANULARITY_MASK] = val;
mtrr[(addr + 1) & MEM_GRANULARITY_MASK] = val >> 8;
return;
}
map = write_mapping[page];
if (map) {
if (map->write_w)
map->write_w(addr, val, map->p);
@@ -800,6 +835,8 @@ uint32_t
readmemll(uint32_t addr)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -829,7 +866,12 @@ readmemll(uint32_t addr)
addr = (uint32_t) (addr64 & rammask);
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr)
return mtrr[addr & MEM_GRANULARITY_MASK] | ((uint32_t) (mtrr[(addr + 1) & MEM_GRANULARITY_MASK]) << 8) | ((uint32_t) (mtrr[(addr + 2) & MEM_GRANULARITY_MASK]) << 16) | ((uint32_t) (mtrr[(addr + 3) & MEM_GRANULARITY_MASK]) << 24);
map = read_mapping[page];
if (map) {
if (map->read_l)
return map->read_l(addr, map->p);
@@ -850,6 +892,8 @@ void
writememll(uint32_t addr, uint32_t val)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -886,7 +930,17 @@ writememll(uint32_t addr, uint32_t val)
addr = (uint32_t) (addr64 & rammask);
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr) {
mtrr[addr & MEM_GRANULARITY_MASK] = val;
mtrr[(addr + 1) & MEM_GRANULARITY_MASK] = val >> 8;
mtrr[(addr + 2) & MEM_GRANULARITY_MASK] = val >> 16;
mtrr[(addr + 3) & MEM_GRANULARITY_MASK] = val >> 24;
return;
}
map = write_mapping[page];
if (map) {
if (map->write_l)
map->write_l(addr, val, map->p);
@@ -907,6 +961,8 @@ uint64_t
readmemql(uint32_t addr)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -935,7 +991,12 @@ readmemql(uint32_t addr)
addr = (uint32_t) (addr64 & rammask);
map = read_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr)
return readmemll(addr) | ((uint64_t)readmemll(addr+4)<<32);
map = read_mapping[page];
if (map && map->read_l)
return map->read_l(addr, map->p) | ((uint64_t)map->read_l(addr + 4, map->p) << 32);
@@ -947,6 +1008,8 @@ void
writememql(uint32_t addr, uint64_t val)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
mem_logical_addr = addr;
@@ -983,7 +1046,21 @@ writememql(uint32_t addr, uint64_t val)
addr = (uint32_t) (addr64 & rammask);
map = write_mapping[addr >> MEM_GRANULARITY_BITS];
page = (addr >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr) {
mtrr[addr & MEM_GRANULARITY_MASK] = val;
mtrr[(addr + 1) & MEM_GRANULARITY_MASK] = val >> 8;
mtrr[(addr + 2) & MEM_GRANULARITY_MASK] = val >> 16;
mtrr[(addr + 3) & MEM_GRANULARITY_MASK] = val >> 24;
mtrr[(addr + 4) & MEM_GRANULARITY_MASK] = val >> 32;
mtrr[(addr + 5) & MEM_GRANULARITY_MASK] = val >> 40;
mtrr[(addr + 6) & MEM_GRANULARITY_MASK] = val >> 48;
mtrr[(addr + 7) & MEM_GRANULARITY_MASK] = val >> 56;
return;
}
map = write_mapping[page];
if (map) {
if (map->write_l) {
map->write_l(addr, val, map->p);
@@ -1024,6 +1101,8 @@ uint16_t
readmemwl(uint32_t seg, uint32_t addr)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
uint32_t addr2 = mem_logical_addr = seg + addr;
@@ -1055,7 +1134,12 @@ readmemwl(uint32_t seg, uint32_t addr)
addr2 = (uint32_t) (addr64 & rammask);
map = read_mapping[addr2 >> MEM_GRANULARITY_BITS];
page = (addr2 >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr)
return mtrr[addr2 & MEM_GRANULARITY_MASK] | ((uint16_t) (mtrr[(addr2 + 1) & MEM_GRANULARITY_MASK]) << 8);
map = read_mapping[page];
if (map && map->read_w)
return map->read_w(addr2, map->p);
@@ -1077,6 +1161,8 @@ void
writememwl(uint32_t seg, uint32_t addr, uint16_t val)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
uint32_t addr2 = mem_logical_addr = seg + addr;
@@ -1118,7 +1204,15 @@ writememwl(uint32_t seg, uint32_t addr, uint16_t val)
addr2 = (uint32_t) (addr64 & rammask);
map = write_mapping[addr2 >> MEM_GRANULARITY_BITS];
page = (addr2 >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr) {
mtrr[addr2 & MEM_GRANULARITY_MASK] = val;
mtrr[(addr2 + 1) & MEM_GRANULARITY_MASK] = val >> 8;
return;
}
map = write_mapping[page];
if (map && map->write_w) {
map->write_w(addr2, val, map->p);
@@ -1137,6 +1231,8 @@ uint32_t
readmemll(uint32_t seg, uint32_t addr)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
uint32_t addr2 = mem_logical_addr = seg + addr;
@@ -1164,7 +1260,12 @@ readmemll(uint32_t seg, uint32_t addr)
addr2 = (uint32_t) (addr64 & rammask);
map = read_mapping[addr2 >> MEM_GRANULARITY_BITS];
page = (addr2 >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr)
return mtrr[addr2 & MEM_GRANULARITY_MASK] | ((uint32_t) (mtrr[(addr2 + 1) & MEM_GRANULARITY_MASK]) << 8) | ((uint32_t) (mtrr[(addr2 + 2) & MEM_GRANULARITY_MASK]) << 16) | ((uint32_t) (mtrr[(addr2 + 3) & MEM_GRANULARITY_MASK]) << 24);
map = read_mapping[page];
if (map && map->read_l)
return map->read_l(addr2, map->p);
@@ -1187,6 +1288,8 @@ void
writememll(uint32_t seg, uint32_t addr, uint32_t val)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
uint32_t addr2 = mem_logical_addr = seg + addr;
@@ -1223,7 +1326,17 @@ writememll(uint32_t seg, uint32_t addr, uint32_t val)
addr2 = (uint32_t) (addr64 & rammask);
map = write_mapping[addr2 >> MEM_GRANULARITY_BITS];
page = (addr2 >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr) {
mtrr[addr2 & MEM_GRANULARITY_MASK] = val;
mtrr[(addr2 + 1) & MEM_GRANULARITY_MASK] = val >> 8;
mtrr[(addr2 + 2) & MEM_GRANULARITY_MASK] = val >> 16;
mtrr[(addr2 + 3) & MEM_GRANULARITY_MASK] = val >> 24;
return;
}
map = write_mapping[page];
if (map && map->write_l) {
map->write_l(addr2, val, map->p);
@@ -1248,6 +1361,8 @@ uint64_t
readmemql(uint32_t seg, uint32_t addr)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
uint32_t addr2 = mem_logical_addr = seg + addr;
@@ -1274,7 +1389,12 @@ readmemql(uint32_t seg, uint32_t addr)
addr2 = (uint32_t) (addr64 & rammask);
map = read_mapping[addr2 >> MEM_GRANULARITY_BITS];
page = (addr2 >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr)
return readmemll(seg,addr) | ((uint64_t)readmemll(seg,addr+4)<<32);
map = read_mapping[page];
if (map && map->read_l)
return map->read_l(addr2, map->p) | ((uint64_t)map->read_l(addr2 + 4, map->p) << 32);
@@ -1286,6 +1406,8 @@ void
writememql(uint32_t seg, uint32_t addr, uint64_t val)
{
uint64_t addr64 = (uint64_t) addr;
uint32_t page;
uint8_t *mtrr;
mem_mapping_t *map;
uint32_t addr2 = mem_logical_addr = seg + addr;
@@ -1322,7 +1444,21 @@ writememql(uint32_t seg, uint32_t addr, uint64_t val)
addr2 = (uint32_t) (addr64 & rammask);
map = write_mapping[addr2 >> MEM_GRANULARITY_BITS];
page = (addr2 >> MEM_GRANULARITY_BITS);
mtrr = mtrr_areas[page];
if (mtrr) {
mtrr[addr2 & MEM_GRANULARITY_MASK] = val;
mtrr[(addr2 + 1) & MEM_GRANULARITY_MASK] = val >> 8;
mtrr[(addr2 + 2) & MEM_GRANULARITY_MASK] = val >> 16;
mtrr[(addr2 + 3) & MEM_GRANULARITY_MASK] = val >> 24;
mtrr[(addr2 + 4) & MEM_GRANULARITY_MASK] = val >> 32;
mtrr[(addr2 + 5) & MEM_GRANULARITY_MASK] = val >> 40;
mtrr[(addr2 + 6) & MEM_GRANULARITY_MASK] = val >> 48;
mtrr[(addr2 + 7) & MEM_GRANULARITY_MASK] = val >> 56;
return;
}
map = write_mapping[page];
if (map && map->write_l) {
map->write_l(addr2, val, map->p);
@@ -2320,20 +2456,29 @@ mem_log("MEM: reset: new pages=%08lx, pages_sz=%i\n", pages, pages_sz);
memset(pages, 0x00, pages_sz*sizeof(page_t));
for (c = 0; c < MEM_MAPPINGS_NO; c++) {
if (mtrr_areas[c]) {
free(mtrr_areas[c]);
mtrr_areas[c] = 0;
}
mtrr_area_refcounts[c] = 0;
}
#ifdef USE_NEW_DYNAREC
if (byte_dirty_mask) {
free(byte_dirty_mask);
byte_dirty_mask = NULL;
}
byte_dirty_mask = malloc((mem_size * 1024) / 8);
memset(byte_dirty_mask, 0, (mem_size * 1024) / 8);
//if (m != 256) fatal("bdm %d\n", ((uint64_t) pages_sz * 4096) / 8);
byte_dirty_mask = malloc(((uint64_t) pages_sz * 4096) / 8);
memset(byte_dirty_mask, 0, ((uint64_t) pages_sz * 4096) / 8);
if (byte_code_present_mask) {
free(byte_code_present_mask);
byte_code_present_mask = NULL;
}
byte_code_present_mask = malloc((mem_size * 1024) / 8);
memset(byte_code_present_mask, 0, (mem_size * 1024) / 8);
byte_code_present_mask = malloc(((uint64_t) pages_sz * 4096) / 8);
memset(byte_code_present_mask, 0, ((uint64_t) pages_sz * 4096) / 8);
#endif
for (c = 0; c < pages_sz; c++) {
@@ -2436,6 +2581,8 @@ mem_init(void)
writelookup2 = malloc((1<<20)*sizeof(uintptr_t));
#endif
memset(mtrr_areas, 0x00, MEM_MAPPINGS_NO*sizeof(uint8_t *));
#if FIXME
memset(ff_array, 0xff, sizeof(ff_array));
#endif
@@ -2533,3 +2680,118 @@ mem_a20_recalc(void)
mem_a20_state = state;
}
void
mem_add_mtrr(uint64_t base, uint64_t mask, uint8_t type)
{
uint64_t size = ((~mask) & 0xffffffff) + 1;
uint64_t page_base, page, addr;
uint8_t *mtrr;
mem_log("Adding MTRR base=%08llx mask=%08llx size=%08llx type=%d\n", base, mask, size, type);
if (size > 0x8000) {
mem_log("Ignoring MTRR, size too big\n");
return;
}
if (mem_addr_is_ram(base)) {
mem_log("Ignoring MTRR, base is in RAM\n");
return;
}
for (page_base = base; page_base < base + size; page_base += MEM_GRANULARITY_SIZE) {
page = (page_base >> MEM_GRANULARITY_BITS);
if (mtrr_areas[page]) {
/* area already allocated, increase refcount and don't allocate it again */
mtrr_area_refcounts[page]++;
continue;
}
/* allocate area */
mtrr = malloc(MEM_GRANULARITY_SIZE);
if (!mtrr)
fatal("Failed to allocate page for MTRR page %08llx (errno=%d)\n", page_base, errno);
/* populate area with data from RAM */
for (addr = 0; addr < MEM_GRANULARITY_SIZE; addr++) {
mtrr[addr] = readmembl(page_base | addr);
}
/* enable area */
mtrr_areas[page] = mtrr;
}
}
void
mem_del_mtrr(uint64_t base, uint64_t mask)
{
uint64_t size = ((~mask) & 0xffffffff) + 1;
uint64_t page_base, page;
mem_log("Deleting MTRR base=%08llx mask=%08llx size=%08llx\n", base, mask, size);
if (size > 0x8000) {
mem_log("Ignoring MTRR, size too big\n");
return;
}
if (mem_addr_is_ram(base)) {
mem_log("Ignoring MTRR, base is in RAM\n");
return;
}
for (page_base = base; page_base < base + size; page_base += MEM_GRANULARITY_SIZE) {
page = (page_base >> MEM_GRANULARITY_BITS);
if (mtrr_areas[page]) {
/* decrease reference count */
if (mtrr_area_refcounts[page] > 0)
mtrr_area_refcounts[page]--;
/* if no references are left, de-allocate area */
if (mtrr_area_refcounts[page] == 0) {
free(mtrr_areas[page]);
mtrr_areas[page] = 0;
}
}
}
}
void
mem_invalidate_mtrr(uint8_t wb)
{
uint64_t page, page_base, addr;
uint8_t *mtrr;
mem_log("Invalidating cache (writeback=%d)\n", wb);
for (page = 0; page < MEM_MAPPINGS_NO; page++) {
mtrr = mtrr_areas[page];
if (mtrr) {
page_base = (page << MEM_GRANULARITY_BITS);
if (!mem_addr_is_ram(page_base))
continue; /* don't invalidate pages not backed by RAM */
/* temporarily set area aside */
mtrr_areas[page] = 0;
/* write data back to memory if requested */
if (wb && write_mapping[page]) { /* don't write back to a page which can't be written to */
for (addr = 0; addr < MEM_GRANULARITY_SIZE; addr++) {
writemembl(page_base | addr, mtrr[addr]);
}
}
/* re-populate area with data from memory */
for (addr = 0; addr < MEM_GRANULARITY_SIZE; addr++) {
mtrr[addr] = readmembl(page_base | addr);
}
/* re-enable area */
mtrr_areas[page] = mtrr;
}
}
}