OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. | |
3 * Use of this source code is governed by a BSD-style license that can be | |
4 * found in the LICENSE file. | |
5 */ | |
6 | |
7 /* | |
8 * This file contains common parts of x86-32 and x86-64 internals (inline | |
9 * functions and defines). | |
10 */ | |
11 | |
12 #ifndef NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ | |
13 #define NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ | |
14 | |
15 #include "native_client/src/shared/platform/nacl_check.h" | |
16 #include "native_client/src/shared/utils/types.h" | |
17 #include "native_client/src/trusted/validator_ragel/unreviewed/decoding.h" | |
18 #include "native_client/src/trusted/validator_ragel/unreviewed/validator.h" | |
19 | |
20 /* Maximum set of R-DFA allowable CPUID features. */ | |
21 extern const NaClCPUFeaturesX86 validator_cpuid_features; | |
22 | |
23 /* Macroses to suppport CPUID handling. */ | |
24 #define SET_CPU_FEATURE(F) \ | |
25 if (!(F##_Allowed)) { \ | |
26 instruction_info_collected |= UNRECOGNIZED_INSTRUCTION; \ | |
27 } \ | |
28 if (!(F)) { \ | |
29 instruction_info_collected |= CPUID_UNSUPPORTED_INSTRUCTION; \ | |
30 } | |
31 #define CPUFeature_3DNOW cpu_features->data[NaClCPUFeature_3DNOW] | |
32 /* | |
33 * AMD documentation claims it's always available if CPUFeature_LM is present, | |
34 * But Intel documentation does not even mention it! | |
35 * Keep it as 3DNow! instruction. | |
36 */ | |
37 #define CPUFeature_3DPRFTCH CPUFeature_3DNOW || CPUFeature_PRE | |
38 #define CPUFeature_AES cpu_features->data[NaClCPUFeature_AES] | |
39 #define CPUFeature_AESAVX CPUFeature_AES && CPUFeature_AVX | |
40 #define CPUFeature_AVX cpu_features->data[NaClCPUFeature_AVX] | |
41 #define CPUFeature_BMI1 cpu_features->data[NaClCPUFeature_BMI1] | |
42 #define CPUFeature_CLFLUSH cpu_features->data[NaClCPUFeature_CLFLUSH] | |
43 #define CPUFeature_CLMUL cpu_features->data[NaClCPUFeature_CLMUL] | |
44 #define CPUFeature_CLMULAVX CPUFeature_CLMUL && CPUFeature_AVX | |
45 #define CPUFeature_CMOV cpu_features->data[NaClCPUFeature_CMOV] | |
46 #define CPUFeature_CMOVx87 CPUFeature_CMOV && CPUFeature_x87 | |
47 #define CPUFeature_CX16 cpu_features->data[NaClCPUFeature_CX16] | |
48 #define CPUFeature_CX8 cpu_features->data[NaClCPUFeature_CX8] | |
49 #define CPUFeature_E3DNOW cpu_features->data[NaClCPUFeature_E3DNOW] | |
50 #define CPUFeature_EMMX cpu_features->data[NaClCPUFeature_EMMX] | |
51 #define CPUFeature_EMMXSSE CPUFeature_EMMX || CPUFeature_SSE | |
52 #define CPUFeature_F16C cpu_features->data[NaClCPUFeature_F16C] | |
53 #define CPUFeature_FMA cpu_features->data[NaClCPUFeature_FMA] | |
54 #define CPUFeature_FMA4 cpu_features->data[NaClCPUFeature_FMA4] | |
55 #define CPUFeature_FXSR cpu_features->data[NaClCPUFeature_FXSR] | |
56 #define CPUFeature_LAHF cpu_features->data[NaClCPUFeature_LAHF] | |
57 #define CPUFeature_LM cpu_features->data[NaClCPUFeature_LM] | |
58 #define CPUFeature_LWP cpu_features->data[NaClCPUFeature_LWP] | |
59 /* | |
60 * We allow lzcnt unconditionally | |
61 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | |
62 */ | |
63 #define CPUFeature_LZCNT TRUE | |
64 #define CPUFeature_MMX cpu_features->data[NaClCPUFeature_MMX] | |
65 #define CPUFeature_MON cpu_features->data[NaClCPUFeature_MON] | |
66 #define CPUFeature_MOVBE cpu_features->data[NaClCPUFeature_MOVBE] | |
67 #define CPUFeature_OSXSAVE cpu_features->data[NaClCPUFeature_OSXSAVE] | |
68 #define CPUFeature_POPCNT cpu_features->data[NaClCPUFeature_POPCNT] | |
69 #define CPUFeature_PRE cpu_features->data[NaClCPUFeature_PRE] | |
70 #define CPUFeature_SSE cpu_features->data[NaClCPUFeature_SSE] | |
71 #define CPUFeature_SSE2 cpu_features->data[NaClCPUFeature_SSE2] | |
72 #define CPUFeature_SSE3 cpu_features->data[NaClCPUFeature_SSE3] | |
73 #define CPUFeature_SSE41 cpu_features->data[NaClCPUFeature_SSE41] | |
74 #define CPUFeature_SSE42 cpu_features->data[NaClCPUFeature_SSE42] | |
75 #define CPUFeature_SSE4A cpu_features->data[NaClCPUFeature_SSE4A] | |
76 #define CPUFeature_SSSE3 cpu_features->data[NaClCPUFeature_SSSE3] | |
77 #define CPUFeature_TBM cpu_features->data[NaClCPUFeature_TBM] | |
78 #define CPUFeature_TSC cpu_features->data[NaClCPUFeature_TSC] | |
79 /* | |
80 * We allow tzcnt unconditionally | |
81 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | |
82 */ | |
83 #define CPUFeature_TZCNT TRUE | |
84 #define CPUFeature_x87 cpu_features->data[NaClCPUFeature_x87] | |
85 #define CPUFeature_XOP cpu_features->data[NaClCPUFeature_XOP] | |
86 | |
87 #define CPUFeature_3DNOW_Allowed \ | |
88 validator_cpuid_features.data[NaClCPUFeature_3DNOW] | |
89 /* | |
90 * AMD documentation claims it's always available if CPUFeature_LM is present, | |
91 * But Intel documentation does not even mention it! | |
92 * Keep it as 3DNow! instruction. | |
93 */ | |
94 #define CPUFeature_3DPRFTCH_Allowed \ | |
95 CPUFeature_3DNOW_Allowed || CPUFeature_PRE_Allowed | |
96 #define CPUFeature_AES_Allowed \ | |
97 validator_cpuid_features.data[NaClCPUFeature_AES] | |
98 #define CPUFeature_AESAVX_Allowed \ | |
99 CPUFeature_AES_Allowed && CPUFeature_AVX_Allowed | |
100 #define CPUFeature_AVX_Allowed \ | |
101 validator_cpuid_features.data[NaClCPUFeature_AVX] | |
102 #define CPUFeature_BMI1_Allowed \ | |
103 validator_cpuid_features.data[NaClCPUFeature_BMI1] | |
104 #define CPUFeature_CLFLUSH_Allowed \ | |
105 validator_cpuid_features.data[NaClCPUFeature_CLFLUSH] | |
106 #define CPUFeature_CLMUL_Allowed \ | |
107 validator_cpuid_features.data[NaClCPUFeature_CLMUL] | |
108 #define CPUFeature_CLMULAVX_Allowed \ | |
109 CPUFeature_CLMUL_Allowed && CPUFeature_AVX_Allowed | |
110 #define CPUFeature_CMOV_Allowed \ | |
111 validator_cpuid_features.data[NaClCPUFeature_CMOV] | |
112 #define CPUFeature_CMOVx87_Allowed \ | |
113 CPUFeature_CMOV_Allowed && CPUFeature_x87_Allowed | |
114 #define CPUFeature_CX16_Allowed \ | |
115 validator_cpuid_features.data[NaClCPUFeature_CX16] | |
116 #define CPUFeature_CX8_Allowed \ | |
117 validator_cpuid_features.data[NaClCPUFeature_CX8] | |
118 #define CPUFeature_E3DNOW_Allowed \ | |
119 validator_cpuid_features.data[NaClCPUFeature_E3DNOW] | |
120 #define CPUFeature_EMMX_Allowed \ | |
121 validator_cpuid_features.data[NaClCPUFeature_EMMX] | |
122 #define CPUFeature_EMMXSSE_Allowed \ | |
123 CPUFeature_EMMX_Allowed || CPUFeature_SSE_Allowed | |
124 #define CPUFeature_F16C_Allowed \ | |
125 validator_cpuid_features.data[NaClCPUFeature_F16C] | |
126 #define CPUFeature_FMA_Allowed \ | |
127 validator_cpuid_features.data[NaClCPUFeature_FMA] | |
128 #define CPUFeature_FMA4_Allowed \ | |
129 validator_cpuid_features.data[NaClCPUFeature_FMA4] | |
130 #define CPUFeature_FXSR_Allowed \ | |
131 validator_cpuid_features.data[NaClCPUFeature_FXSR] | |
132 #define CPUFeature_LAHF_Allowed \ | |
133 validator_cpuid_features.data[NaClCPUFeature_LAHF] | |
134 #define CPUFeature_LM_Allowed \ | |
135 validator_cpuid_features.data[NaClCPUFeature_LM] | |
136 #define CPUFeature_LWP_Allowed \ | |
137 validator_cpuid_features.data[NaClCPUFeature_LWP] | |
138 /* | |
139 * We allow lzcnt unconditionally | |
140 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | |
141 */ | |
142 #define CPUFeature_LZCNT_Allowed TRUE | |
143 #define CPUFeature_MMX_Allowed \ | |
144 validator_cpuid_features.data[NaClCPUFeature_MMX] | |
145 #define CPUFeature_MON_Allowed \ | |
146 validator_cpuid_features.data[NaClCPUFeature_MON] | |
147 #define CPUFeature_MOVBE_Allowed \ | |
148 validator_cpuid_features.data[NaClCPUFeature_MOVBE] | |
149 #define CPUFeature_OSXSAVE_Allowed \ | |
150 validator_cpuid_features.data[NaClCPUFeature_OSXSAVE] | |
151 #define CPUFeature_POPCNT_Allowed \ | |
152 validator_cpuid_features.data[NaClCPUFeature_POPCNT] | |
153 #define CPUFeature_PRE_Allowed \ | |
154 validator_cpuid_features.data[NaClCPUFeature_PRE] | |
155 #define CPUFeature_SSE_Allowed \ | |
156 validator_cpuid_features.data[NaClCPUFeature_SSE] | |
157 #define CPUFeature_SSE2_Allowed \ | |
158 validator_cpuid_features.data[NaClCPUFeature_SSE2] | |
159 #define CPUFeature_SSE3_Allowed \ | |
160 validator_cpuid_features.data[NaClCPUFeature_SSE3] | |
161 #define CPUFeature_SSE41_Allowed \ | |
162 validator_cpuid_features.data[NaClCPUFeature_SSE41] | |
163 #define CPUFeature_SSE42_Allowed \ | |
164 validator_cpuid_features.data[NaClCPUFeature_SSE42] | |
165 #define CPUFeature_SSE4A_Allowed \ | |
166 validator_cpuid_features.data[NaClCPUFeature_SSE4A] | |
167 #define CPUFeature_SSSE3_Allowed \ | |
168 validator_cpuid_features.data[NaClCPUFeature_SSSE3] | |
169 #define CPUFeature_TBM_Allowed \ | |
170 validator_cpuid_features.data[NaClCPUFeature_TBM] | |
171 #define CPUFeature_TSC_Allowed \ | |
172 validator_cpuid_features.data[NaClCPUFeature_TSC] | |
173 /* | |
174 * We allow tzcnt unconditionally | |
175 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | |
176 */ | |
177 #define CPUFeature_TZCNT_Allowed TRUE | |
178 #define CPUFeature_x87_Allowed \ | |
179 validator_cpuid_features.data[NaClCPUFeature_x87] | |
180 #define CPUFeature_XOP_Allowed \ | |
181 validator_cpuid_features.data[NaClCPUFeature_XOP] | |
182 | |
183 /* Remember some information about instruction for further processing. */ | |
184 #define GET_REX_PREFIX() rex_prefix | |
185 #define SET_REX_PREFIX(P) rex_prefix = (P) | |
186 #define GET_VEX_PREFIX2() vex_prefix2 | |
187 #define SET_VEX_PREFIX2(P) vex_prefix2 = (P) | |
188 #define GET_VEX_PREFIX3() vex_prefix3 | |
189 #define SET_VEX_PREFIX3(P) vex_prefix3 = (P) | |
190 #define SET_MODRM_BASE(N) base = (N) | |
191 #define SET_MODRM_INDEX(N) index = (N) | |
192 | |
193 enum { | |
194 REX_B = 1, | |
195 REX_X = 2, | |
196 REX_R = 4, | |
197 REX_W = 8 | |
198 }; | |
199 | |
200 enum operand_kind { | |
201 OperandSandboxIrrelevant = 0, | |
202 /* | |
203 * Currently we do not distinguish 8bit and 16bit modifications from | |
204 * OperandSandboxUnrestricted to match the behavior of the old validator. | |
205 * | |
206 * 8bit operands must be distinguished from other types because the REX prefix | |
207 * regulates the choice between %ah and %spl, as well as %ch and %bpl. | |
208 */ | |
209 OperandSandbox8bit, | |
210 OperandSandboxRestricted, | |
211 OperandSandboxUnrestricted | |
212 }; | |
213 | |
214 #define SET_OPERAND_NAME(N, S) operand_states |= ((S) << ((N) << 3)) | |
215 #define SET_OPERAND_TYPE(N, T) SET_OPERAND_TYPE_ ## T(N) | |
216 #define SET_OPERAND_TYPE_OPERAND_SIZE_8_BIT(N) \ | |
217 operand_states |= OperandSandbox8bit << (5 + ((N) << 3)) | |
218 #define SET_OPERAND_TYPE_OPERAND_SIZE_16_BIT(N) \ | |
219 operand_states |= OperandSandboxUnrestricted << (5 + ((N) << 3)) | |
220 #define SET_OPERAND_TYPE_OPERAND_SIZE_32_BIT(N) \ | |
221 operand_states |= OperandSandboxRestricted << (5 + ((N) << 3)) | |
222 #define SET_OPERAND_TYPE_OPERAND_SIZE_64_BIT(N) \ | |
223 operand_states |= OperandSandboxUnrestricted << (5 + ((N) << 3)) | |
224 #define CHECK_OPERAND(N, S, T) \ | |
225 ((operand_states & (0xff << ((N) << 3))) == ((S | (T << 5)) << ((N) << 3))) | |
226 | |
227 /* Ignore this information for now. */ | |
228 #define SET_DATA16_PREFIX(S) | |
229 #define SET_LOCK_PREFIX(S) | |
230 #define SET_REPZ_PREFIX(S) | |
231 #define SET_REPNZ_PREFIX(S) | |
232 #define SET_BRANCH_TAKEN(S) | |
233 #define SET_BRANCH_NOT_TAKEN(S) | |
234 #define SET_MODRM_SCALE(S) | |
235 #define SET_DISP_PTR(P) | |
236 #define SET_IMM_PTR(P) | |
237 #define SET_IMM2_PTR(P) | |
238 | |
239 /* | |
240 * Collect information about anyfields (offsets and immediates). | |
241 * Note: we use += below instead of |=. This means two immediate fields will | |
242 * be treated as one. It's not important for safety. | |
243 */ | |
244 #define SET_DISP_TYPE(T) SET_DISP_TYPE_##T | |
245 #define SET_DISP_TYPE_DISPNONE | |
246 #define SET_DISP_TYPE_DISP8 (instruction_info_collected += DISPLACEMENT_8BIT) | |
247 #define SET_DISP_TYPE_DISP32 (instruction_info_collected += DISPLACEMENT_32BIT) | |
248 #define SET_IMM_TYPE(T) SET_IMM_TYPE_##T | |
249 /* imm2 field is a flag, not accumulator, like with other immediates */ | |
250 #define SET_IMM_TYPE_IMM2 (instruction_info_collected |= IMMEDIATE_2BIT) | |
251 #define SET_IMM_TYPE_IMM8 (instruction_info_collected += IMMEDIATE_8BIT) | |
252 #define SET_IMM_TYPE_IMM16 (instruction_info_collected += IMMEDIATE_16BIT) | |
253 #define SET_IMM_TYPE_IMM32 (instruction_info_collected += IMMEDIATE_32BIT) | |
254 #define SET_IMM_TYPE_IMM64 (instruction_info_collected += IMMEDIATE_64BIT) | |
255 #define SET_IMM2_TYPE(T) SET_IMM2_TYPE_##T | |
256 #define SET_IMM2_TYPE_IMM8 \ | |
257 (instruction_info_collected += SECOND_IMMEDIATE_8BIT) | |
258 #define SET_IMM2_TYPE_IMM16 \ | |
259 (instruction_info_collected += SECOND_IMMEDIATE_16BIT) | |
260 | |
261 #define BITMAP_WORD_NAME BITMAP_WORD_NAME1(NACL_HOST_WORDSIZE) | |
262 #define BITMAP_WORD_NAME1(size) BITMAP_WORD_NAME2(size) | |
263 #define BITMAP_WORD_NAME2(size) uint##size##_t | |
264 | |
265 typedef BITMAP_WORD_NAME bitmap_word; | |
266 | |
267 static INLINE bitmap_word *BitmapAllocate(size_t indexes) { | |
268 bitmap_word *bitmap; | |
269 size_t byte_count = ((indexes + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE)* | |
270 sizeof *bitmap; | |
271 bitmap = malloc(byte_count); | |
272 if (bitmap != NULL) { | |
273 memset(bitmap, 0, byte_count); | |
274 } | |
275 return bitmap; | |
276 } | |
277 | |
278 static FORCEINLINE int BitmapIsBitSet(bitmap_word *bitmap, size_t index) { | |
279 return (bitmap[index / NACL_HOST_WORDSIZE] & | |
280 (((bitmap_word)1) << (index % NACL_HOST_WORDSIZE))) != 0; | |
281 } | |
282 | |
283 static FORCEINLINE void BitmapSetBit(bitmap_word *bitmap, size_t index) { | |
284 bitmap[index / NACL_HOST_WORDSIZE] |= | |
285 ((bitmap_word)1) << (index % NACL_HOST_WORDSIZE); | |
286 } | |
287 | |
288 static FORCEINLINE void BitmapClearBit(bitmap_word *bitmap, size_t index) { | |
289 bitmap[index / NACL_HOST_WORDSIZE] &= | |
290 ~(((bitmap_word)1) << (index % NACL_HOST_WORDSIZE)); | |
291 } | |
292 | |
293 /* All the bits must be in a single 32-bit bundle. */ | |
294 static FORCEINLINE int BitmapIsAnyBitSet(bitmap_word *bitmap, | |
295 size_t index, size_t bits) { | |
296 return (bitmap[index / NACL_HOST_WORDSIZE] & | |
297 (((((bitmap_word)1) << bits) - 1) << (index % NACL_HOST_WORDSIZE))) != 0; | |
298 } | |
299 | |
300 /* All the bits must be in a single 32-bit bundle. */ | |
301 static FORCEINLINE void BitmapSetBits(bitmap_word *bitmap, | |
302 size_t index, size_t bits) { | |
303 bitmap[index / NACL_HOST_WORDSIZE] |= | |
304 ((((bitmap_word)1) << bits) - 1) << (index % NACL_HOST_WORDSIZE); | |
305 } | |
306 | |
307 /* Mark the destination of a jump instruction and make an early validity check: | |
308 * to jump outside given code region, the target address must be aligned. | |
309 * | |
310 * Returns TRUE iff the jump passes the early validity check. | |
311 */ | |
312 static FORCEINLINE int MarkJumpTarget(size_t jump_dest, | |
313 bitmap_word *jump_dests, | |
314 size_t size) { | |
315 if ((jump_dest & kBundleMask) == 0) { | |
316 return TRUE; | |
317 } | |
318 if (jump_dest >= size) { | |
319 return FALSE; | |
320 } | |
321 BitmapSetBit(jump_dests, jump_dest); | |
322 return TRUE; | |
323 } | |
324 | |
325 | |
326 static INLINE Bool ProcessInvalidJumpTargets( | |
327 const uint8_t *data, | |
328 size_t size, | |
329 bitmap_word *valid_targets, | |
330 bitmap_word *jump_dests, | |
331 validation_callback_func user_callback, | |
332 void *callback_data) { | |
333 size_t elements = (size + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE; | |
334 size_t i, j; | |
335 Bool result = TRUE; | |
336 | |
337 for (i = 0; i < elements ; i++) { | |
338 bitmap_word jump_dest_mask = jump_dests[i]; | |
339 bitmap_word valid_target_mask = valid_targets[i]; | |
340 if ((jump_dest_mask & ~valid_target_mask) != 0) { | |
341 for (j = i * NACL_HOST_WORDSIZE; j < (i + 1) * NACL_HOST_WORDSIZE; j++) | |
342 if (BitmapIsBitSet(jump_dests, j) && | |
343 !BitmapIsBitSet(valid_targets, j)) { | |
344 result &= user_callback(data + j, | |
345 data + j, | |
346 BAD_JUMP_TARGET, | |
347 callback_data); | |
348 } | |
349 } | |
350 } | |
351 | |
352 return result; | |
353 } | |
354 | |
355 | |
356 /* | |
357 * Process rel8_operand. Note: rip points to the beginning of the next | |
358 * instruction here and x86 encoding guarantees rel8 field is the last one | |
359 * in a current instruction. | |
360 */ | |
361 static FORCEINLINE void rel8_operand(const uint8_t *rip, | |
362 const uint8_t* codeblock_start, | |
363 bitmap_word *jump_dests, | |
364 size_t jumpdests_size, | |
365 uint32_t *instruction_info_collected) { | |
366 int8_t offset = (uint8_t) (rip[-1]); | |
367 size_t jump_dest = offset + (rip - codeblock_start); | |
368 | |
369 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) | |
370 *instruction_info_collected |= RELATIVE_8BIT; | |
371 else | |
372 *instruction_info_collected |= RELATIVE_8BIT | DIRECT_JUMP_OUT_OF_RANGE; | |
373 } | |
374 | |
375 /* | |
376 * Process rel32_operand. Note: rip points to the beginning of the next | |
377 * instruction here and x86 encoding guarantees rel32 field is the last one | |
378 * in a current instruction. | |
379 */ | |
380 static FORCEINLINE void rel32_operand(const uint8_t *rip, | |
381 const uint8_t* codeblock_start, | |
382 bitmap_word *jump_dests, | |
383 size_t jumpdests_size, | |
384 uint32_t *instruction_info_collected) { | |
385 int32_t offset = (rip[-4] + 256U * (rip[-3] + 256U * ( | |
386 rip[-2] + 256U * ((uint32_t) rip[-1])))); | |
387 size_t jump_dest = offset + (rip - codeblock_start); | |
388 | |
389 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) | |
390 *instruction_info_collected |= RELATIVE_32BIT; | |
391 else | |
392 *instruction_info_collected |= RELATIVE_32BIT | DIRECT_JUMP_OUT_OF_RANGE; | |
393 } | |
394 | |
395 static INLINE void check_access(ptrdiff_t instruction_start, | |
396 enum register_name base, | |
397 enum register_name index, | |
398 uint8_t restricted_register, | |
399 bitmap_word *valid_targets, | |
400 uint32_t *instruction_info_collected) { | |
401 if ((base == REG_RIP) || (base == REG_R15) || | |
402 (base == REG_RSP) || (base == REG_RBP)) { | |
403 if ((index == NO_REG) || (index == REG_RIZ)) | |
404 { /* do nothing. */ } | |
405 else if (index == restricted_register) | |
406 BitmapClearBit(valid_targets, instruction_start), | |
407 *instruction_info_collected |= RESTRICTED_REGISTER_USED; | |
408 else | |
409 *instruction_info_collected |= UNRESTRICTED_INDEX_REGISTER; | |
410 } else { | |
411 *instruction_info_collected |= FORBIDDEN_BASE_REGISTER; | |
412 } | |
413 } | |
414 | |
415 | |
416 static INLINE void process_0_operands(enum register_name *restricted_register, | |
417 uint32_t *instruction_info_collected) { | |
418 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special | |
419 * instruction, not with regular instruction. */ | |
420 if (*restricted_register == REG_RSP) { | |
421 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED; | |
422 } else if (*restricted_register == REG_RBP) { | |
423 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED; | |
424 } | |
425 *restricted_register = NO_REG; | |
426 } | |
427 | |
428 static INLINE void process_1_operand(enum register_name *restricted_register, | |
429 uint32_t *instruction_info_collected, | |
430 uint8_t rex_prefix, | |
431 uint32_t operand_states) { | |
432 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special | |
433 * instruction, not with regular instruction. */ | |
434 if (*restricted_register == REG_RSP) { | |
435 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED; | |
436 } else if (*restricted_register == REG_RBP) { | |
437 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED; | |
438 } | |
439 *restricted_register = NO_REG; | |
440 if (CHECK_OPERAND(0, REG_R15, OperandSandbox8bit) || | |
441 CHECK_OPERAND(0, REG_R15, OperandSandboxRestricted) || | |
442 CHECK_OPERAND(0, REG_R15, OperandSandboxUnrestricted)) { | |
443 *instruction_info_collected |= R15_MODIFIED; | |
444 } else if ((CHECK_OPERAND(0, REG_RBP, OperandSandbox8bit) && rex_prefix) || | |
445 CHECK_OPERAND(0, REG_RBP, OperandSandboxRestricted) || | |
446 CHECK_OPERAND(0, REG_RBP, OperandSandboxUnrestricted)) { | |
447 *instruction_info_collected |= BPL_MODIFIED; | |
448 } else if ((CHECK_OPERAND(0, REG_RSP, OperandSandbox8bit) && rex_prefix) || | |
449 CHECK_OPERAND(0, REG_RSP, OperandSandboxRestricted) || | |
450 CHECK_OPERAND(0, REG_RSP, OperandSandboxUnrestricted)) { | |
451 *instruction_info_collected |= SPL_MODIFIED; | |
452 } | |
453 } | |
454 | |
455 static INLINE void process_1_operand_zero_extends( | |
456 enum register_name *restricted_register, | |
457 uint32_t *instruction_info_collected, uint8_t rex_prefix, | |
458 uint32_t operand_states) { | |
459 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special | |
460 * instruction, not with regular instruction. */ | |
461 if (*restricted_register == REG_RSP) { | |
462 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED; | |
463 } else if (*restricted_register == REG_RBP) { | |
464 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED; | |
465 } | |
466 *restricted_register = NO_REG; | |
467 if (CHECK_OPERAND(0, REG_R15, OperandSandbox8bit) || | |
468 CHECK_OPERAND(0, REG_R15, OperandSandboxRestricted) || | |
469 CHECK_OPERAND(0, REG_R15, OperandSandboxUnrestricted)) { | |
470 *instruction_info_collected |= R15_MODIFIED; | |
471 } else if ((CHECK_OPERAND(0, REG_RBP, OperandSandbox8bit) && rex_prefix) || | |
472 CHECK_OPERAND(0, REG_RBP, OperandSandboxUnrestricted)) { | |
473 *instruction_info_collected |= BPL_MODIFIED; | |
474 } else if ((CHECK_OPERAND(0, REG_RSP, OperandSandbox8bit) && rex_prefix) || | |
475 CHECK_OPERAND(0, REG_RSP, OperandSandboxUnrestricted)) { | |
476 *instruction_info_collected |= SPL_MODIFIED; | |
477 /* Take 2 bits of operand type from operand_states as *restricted_register, | |
478 * make sure operand_states denotes a register (4th bit == 0). */ | |
479 } else if ((operand_states & 0x70) == (OperandSandboxRestricted << 5)) { | |
480 *restricted_register = operand_states & 0x0f; | |
481 } | |
482 } | |
483 | |
484 static INLINE void process_2_operands(enum register_name *restricted_register, | |
485 uint32_t *instruction_info_collected, | |
486 uint8_t rex_prefix, | |
487 uint32_t operand_states) { | |
488 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special | |
489 * instruction, not with regular instruction. */ | |
490 if (*restricted_register == REG_RSP) { | |
491 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED; | |
492 } else if (*restricted_register == REG_RBP) { | |
493 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED; | |
494 } | |
495 *restricted_register = NO_REG; | |
496 if (CHECK_OPERAND(0, REG_R15, OperandSandbox8bit) || | |
497 CHECK_OPERAND(0, REG_R15, OperandSandboxRestricted) || | |
498 CHECK_OPERAND(0, REG_R15, OperandSandboxUnrestricted) || | |
499 CHECK_OPERAND(1, REG_R15, OperandSandbox8bit) || | |
500 CHECK_OPERAND(1, REG_R15, OperandSandboxRestricted) || | |
501 CHECK_OPERAND(1, REG_R15, OperandSandboxUnrestricted)) { | |
502 *instruction_info_collected |= R15_MODIFIED; | |
503 } else if ((CHECK_OPERAND(0, REG_RBP, OperandSandbox8bit) && rex_prefix) || | |
504 CHECK_OPERAND(0, REG_RBP, OperandSandboxRestricted) || | |
505 CHECK_OPERAND(0, REG_RBP, OperandSandboxUnrestricted) || | |
506 (CHECK_OPERAND(1, REG_RBP, OperandSandbox8bit) && rex_prefix) || | |
507 CHECK_OPERAND(1, REG_RBP, OperandSandboxRestricted) || | |
508 CHECK_OPERAND(1, REG_RBP, OperandSandboxUnrestricted)) { | |
509 *instruction_info_collected |= BPL_MODIFIED; | |
510 } else if ((CHECK_OPERAND(0, REG_RSP, OperandSandbox8bit) && rex_prefix) || | |
511 CHECK_OPERAND(0, REG_RSP, OperandSandboxRestricted) || | |
512 CHECK_OPERAND(0, REG_RSP, OperandSandboxUnrestricted) || | |
513 (CHECK_OPERAND(1, REG_RSP, OperandSandbox8bit) && rex_prefix) || | |
514 CHECK_OPERAND(1, REG_RSP, OperandSandboxRestricted) || | |
515 CHECK_OPERAND(1, REG_RSP, OperandSandboxUnrestricted)) { | |
516 *instruction_info_collected |= SPL_MODIFIED; | |
517 } | |
518 } | |
519 | |
520 static INLINE void process_2_operands_zero_extends( | |
521 enum register_name *restricted_register, | |
522 uint32_t *instruction_info_collected, | |
523 uint8_t rex_prefix, uint32_t operand_states) { | |
524 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special | |
525 * instruction, not with regular instruction. */ | |
526 if (*restricted_register == REG_RSP) { | |
527 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED; | |
528 } else if (*restricted_register == REG_RBP) { | |
529 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED; | |
530 } | |
531 *restricted_register = NO_REG; | |
532 if (CHECK_OPERAND(0, REG_R15, OperandSandbox8bit) || | |
533 CHECK_OPERAND(0, REG_R15, OperandSandboxRestricted) || | |
534 CHECK_OPERAND(0, REG_R15, OperandSandboxUnrestricted) || | |
535 CHECK_OPERAND(1, REG_R15, OperandSandbox8bit) || | |
536 CHECK_OPERAND(1, REG_R15, OperandSandboxRestricted) || | |
537 CHECK_OPERAND(1, REG_R15, OperandSandboxUnrestricted)) { | |
538 *instruction_info_collected |= R15_MODIFIED; | |
539 } else if ((CHECK_OPERAND(0, REG_RBP, OperandSandbox8bit) && rex_prefix) || | |
540 CHECK_OPERAND(0, REG_RBP, OperandSandboxUnrestricted) || | |
541 (CHECK_OPERAND(1, REG_RBP, OperandSandbox8bit) && rex_prefix) || | |
542 CHECK_OPERAND(1, REG_RBP, OperandSandboxUnrestricted)) { | |
543 *instruction_info_collected |= BPL_MODIFIED; | |
544 } else if ((CHECK_OPERAND(0, REG_RSP, OperandSandbox8bit) && rex_prefix) || | |
545 CHECK_OPERAND(0, REG_RSP, OperandSandboxUnrestricted) || | |
546 (CHECK_OPERAND(1, REG_RSP, OperandSandbox8bit) && rex_prefix) || | |
547 CHECK_OPERAND(1, REG_RSP, OperandSandboxUnrestricted)) { | |
548 *instruction_info_collected |= SPL_MODIFIED; | |
549 /* Take 2 bits of operand type from operand_states as *restricted_register, | |
550 * make sure operand_states denotes a register (4th bit == 0). */ | |
551 } else if ((operand_states & 0x70) == (OperandSandboxRestricted << 5)) { | |
552 *restricted_register = operand_states & 0x0f; | |
553 if (CHECK_OPERAND(1, REG_RSP, OperandSandboxRestricted)) { | |
554 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED; | |
555 } else if (CHECK_OPERAND(1, REG_RBP, OperandSandboxRestricted)) { | |
556 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED; | |
557 } | |
558 /* Take 2 bits of operand type from operand_states as *restricted_register, | |
559 * make sure operand_states denotes a register (12th bit == 0). */ | |
560 } else if ((operand_states & 0x7000) == (OperandSandboxRestricted << 13)) { | |
561 *restricted_register = (operand_states & 0x0f00) >> 8; | |
562 } | |
563 } | |
564 | |
565 #endif /* NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ */ | |
OLD | NEW |