35 |
#include <core/thread.h> |
#include <core/thread.h> |
36 |
#include "apic_pass.h" |
#include "apic_pass.h" |
37 |
#include "asm.h" |
#include "asm.h" |
38 |
|
#include "cpu_mmu.h" |
39 |
#include "mm.h" |
#include "mm.h" |
40 |
#include "vt_internal.h" |
#include "vt_internal.h" |
41 |
#include "constants.h" |
#include "constants.h" |
53 |
#define EPT_WB 0x6 |
#define EPT_WB 0x6 |
54 |
#define EPT_LENGTH ((EPT_MAX_LEVEL - 1) << 3) |
#define EPT_LENGTH ((EPT_MAX_LEVEL - 1) << 3) |
55 |
|
|
56 |
|
#define VMCS_EXIT_QUALIFICATION_READ_BIT 0x1 |
57 |
|
#define VMCS_EXIT_QUALIFICATION_WRITE_BIT 0x2 |
58 |
|
#define VMCS_EXIT_QUALIFICATION_INST_BIT 0x4 |
59 |
|
#define VMCS_EXIT_QUALIFICATION_LINEAR_ADDR_VALID_BIT 0x80 |
60 |
|
|
61 |
|
|
62 |
#define ept_offset(addr, level) \ |
#define ept_offset(addr, level) \ |
63 |
((addr >> (PAGE_SHIFT + (level - 1) * EPT_LEVEL_STRIDE)) \ |
((addr >> (PAGE_SHIFT + (level - 1) * EPT_LEVEL_STRIDE)) \ |
64 |
& EPT_LEVEL_MASK) |
& EPT_LEVEL_MASK) |
99 |
if (cpu_is_bsp()) { |
if (cpu_is_bsp()) { |
100 |
printf("Enabling EPT\n"); |
printf("Enabling EPT\n"); |
101 |
} |
} |
102 |
|
|
103 |
current->u.vt.ept_enabled = true; |
current->u.vt.ept_enabled = true; |
104 |
|
|
105 |
if (vm_current_is_vbsp()) { |
spinlock_lock(&vt_data->ept_lock); |
106 |
spinlock_init(&vt_data->ept_lock); |
if (vt_data->ept_l4tbl == 0) { |
107 |
err = alloc_page(&vaddr, &vt_data->ept_l4tbl); |
err = alloc_page(&vaddr, &vt_data->ept_l4tbl); |
108 |
if (err) { |
if (err) { |
109 |
panic("Failed to allocate a ept l4tbl."); |
panic("Failed to allocate a ept l4tbl."); |
110 |
} |
} |
111 |
memset(vaddr, 0, PAGE_SIZE); |
memset(vaddr, 0, PAGE_SIZE); |
112 |
} |
} |
113 |
|
spinlock_unlock(&vt_data->ept_lock); |
114 |
|
|
115 |
ctls = ctls2_or | VMCS_PROC_BASED_VMEXEC_CTL2_EPT_BIT; |
ctls = ctls2_or | VMCS_PROC_BASED_VMEXEC_CTL2_EPT_BIT; |
116 |
asm_vmwrite(VMCS_PROC_BASED_VMEXEC_CTL2, ctls); |
asm_vmwrite(VMCS_PROC_BASED_VMEXEC_CTL2, ctls); |
147 |
|
|
148 |
if ((*pte & EPT_VAILED_MASK) == 0) { |
if ((*pte & EPT_VAILED_MASK) == 0) { |
149 |
ret = alloc_page(&vaddr, &phys); |
ret = alloc_page(&vaddr, &phys); |
150 |
if (ret != 0) { |
if (ret != VMMERR_SUCCESS) { |
151 |
return NULL; |
return NULL; |
152 |
} |
} |
153 |
memset(vaddr, 0, PAGE_SIZE); |
memset(vaddr, 0, PAGE_SIZE); |
164 |
return tbl + ept_offset(gphys, cur_level); |
return tbl + ept_offset(gphys, cur_level); |
165 |
} |
} |
166 |
|
|
167 |
|
static void |
168 |
|
vt_ept_generate_pagefault(vmmerr_t vmmerr) |
169 |
|
{ |
170 |
|
bool wr, us; |
171 |
|
ulong linear_addr; |
172 |
|
ulong qual; |
173 |
|
|
174 |
|
asm_vmread(VMCS_EXIT_QUALIFICATION, &qual); |
175 |
|
if ((qual & VMCS_EXIT_QUALIFICATION_LINEAR_ADDR_VALID_BIT) == 0) { |
176 |
|
panic("EPT violation without linear address. qual 0x%lx", |
177 |
|
qual); |
178 |
|
} |
179 |
|
wr = qual & VMCS_EXIT_QUALIFICATION_WRITE_BIT ? true : false; |
180 |
|
us = seg_user_mode(); |
181 |
|
asm_vmread(VMCS_GUEST_LINEAR_ADDR, &linear_addr); |
182 |
|
printf("vt_ept_generate_pagefault 0x%lx wr %d us %d\n", |
183 |
|
linear_addr, wr, us); |
184 |
|
mmu_generate_pagefault(vmmerr, wr, us, linear_addr); |
185 |
|
} |
186 |
|
|
187 |
void |
void |
188 |
vt_ept_violation(void) |
vt_ept_violation(void) |
189 |
{ |
{ |
200 |
ret = mmio_pagefault(gphys); |
ret = mmio_pagefault(gphys); |
201 |
mmio_unlock(); |
mmio_unlock(); |
202 |
if (ret != VMMERR_NODEV) { |
if (ret != VMMERR_NODEV) { |
203 |
if (ret != VMMERR_SUCCESS && |
if (ret == VMMERR_SUCCESS) { |
204 |
(ret < VMMERR_PAGE_NOT_PRESENT || |
return; |
205 |
ret > VMMERR_PAGE_BAD_RESERVED_BIT)) { |
} |
206 |
|
if (ret < VMMERR_PAGE_NOT_PRESENT || |
207 |
|
ret > VMMERR_PAGE_BAD_RESERVED_BIT) { |
208 |
panic("Failed to emulate accessing to MMIO area. ret 0x%x", |
panic("Failed to emulate accessing to MMIO area. ret 0x%x", |
209 |
ret); |
ret); |
210 |
} |
} |
211 |
|
vt_ept_generate_pagefault(ret); |
212 |
return; |
return; |
213 |
} |
} |
214 |
|
|
215 |
hphys = current->vm->gmm.gp2hp(gphys); |
hphys = current->vm->gmm.gp2hp(gphys); |
216 |
if (hphys == GMM_NO_MAPPING) { |
if (hphys == GMM_NO_MAPPING) { |
217 |
ret = cpu_interpreter(); |
ret = cpu_interpreter(); |
218 |
if (ret != VMMERR_SUCCESS && |
if (ret == VMMERR_SUCCESS) { |
219 |
(ret < VMMERR_PAGE_NOT_PRESENT || |
return; |
220 |
ret > VMMERR_PAGE_BAD_RESERVED_BIT)) { |
} |
221 |
|
if (ret < VMMERR_PAGE_NOT_PRESENT || |
222 |
|
ret > VMMERR_PAGE_BAD_RESERVED_BIT) { |
223 |
panic("Failed to emulate accessing to no mapping area. ret 0x%x", |
panic("Failed to emulate accessing to no mapping area. ret 0x%x", |
224 |
ret); |
ret); |
225 |
} |
} |
226 |
|
vt_ept_generate_pagefault(ret); |
227 |
return; |
return; |
228 |
} |
} |
229 |
|
|