OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. | 2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. |
3 * Use of this source code is governed by a BSD-style license that can be | 3 * Use of this source code is governed by a BSD-style license that can be |
4 * found in the LICENSE file. | 4 * found in the LICENSE file. |
5 */ | 5 */ |
6 | 6 |
7 /* | 7 /* |
8 * This file contains common parts of x86-32 and x86-64 internals (inline | 8 * This file contains common parts of x86-32 and x86-64 internals (inline |
9 * functions and defines). | 9 * functions and defines). |
10 */ | 10 */ |
11 | 11 |
12 #ifndef NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ | 12 #ifndef NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ |
13 #define NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ | 13 #define NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ |
14 | 14 |
15 #include "native_client/src/shared/platform/nacl_check.h" | 15 #include "native_client/src/shared/platform/nacl_check.h" |
16 #include "native_client/src/shared/utils/types.h" | 16 #include "native_client/src/shared/utils/types.h" |
17 #include "native_client/src/trusted/validator_ragel/unreviewed/decoding.h" | 17 #include "native_client/src/trusted/validator_ragel/decoding.h" |
18 #include "native_client/src/trusted/validator_ragel/validator.h" | 18 #include "native_client/src/trusted/validator_ragel/validator.h" |
19 | 19 |
20 /* Maximum set of R-DFA allowable CPUID features. */ | 20 /* Maximum set of R-DFA allowable CPUID features. */ |
21 extern const NaClCPUFeaturesX86 kValidatorCPUIDFeatures; | 21 extern const NaClCPUFeaturesX86 kValidatorCPUIDFeatures; |
22 | 22 |
23 /* Macroses to suppport CPUID handling. */ | 23 /* Macroses to suppport CPUID handling. */ |
halyavin
2013/03/18 11:58:44
misprint: support.
khim
2013/03/19 14:54:46
Done.
| |
24 #define SET_CPU_FEATURE(F) \ | 24 #define SET_CPU_FEATURE(F) \ |
halyavin
2013/03/18 11:58:44
Comment what is F and F_Allowed (variable? constan
khim
2013/03/19 14:54:46
If it'll be anything but macro then "macroses" in
halyavin
2013/03/19 15:02:30
No. You can create variables with names F and F_Al
khim
2013/03/21 14:38:17
This will be a lie since it'll be macro then, not
| |
25 if (!(F##_Allowed)) { \ | 25 if (!(F##_Allowed)) { \ |
26 instruction_info_collected |= UNRECOGNIZED_INSTRUCTION; \ | 26 instruction_info_collected |= UNRECOGNIZED_INSTRUCTION; \ |
27 } \ | 27 } \ |
28 if (!(F)) { \ | 28 if (!(F)) { \ |
29 instruction_info_collected |= CPUID_UNSUPPORTED_INSTRUCTION; \ | 29 instruction_info_collected |= CPUID_UNSUPPORTED_INSTRUCTION; \ |
30 } | 30 } |
31 #define CPUFeature_3DNOW cpu_features->data[NaClCPUFeatureX86_3DNOW] | 31 #define CPUFeature_3DNOW cpu_features->data[NaClCPUFeatureX86_3DNOW] |
32 /* | 32 /* |
33 * AMD documentation claims it's always available if CPUFeature_LM is present, | 33 * AMD documentation claims it's always available if CPUFeature_LM is present, |
34 * But Intel documentation does not even mention it! | 34 * But Intel documentation does not even mention it! |
35 * Keep it as 3DNow! instruction. | 35 * Keep it as 3DNow! instruction. |
36 */ | 36 */ |
37 #define CPUFeature_3DPRFTCH CPUFeature_3DNOW || CPUFeature_PRE | 37 #define CPUFeature_3DPRFTCH CPUFeature_3DNOW || CPUFeature_PRE |
38 #define CPUFeature_AES cpu_features->data[NaClCPUFeatureX86_AES] | 38 #define CPUFeature_AES cpu_features->data[NaClCPUFeatureX86_AES] |
39 #define CPUFeature_AESAVX CPUFeature_AES && CPUFeature_AVX | 39 #define CPUFeature_AESAVX CPUFeature_AES && CPUFeature_AVX |
Vlad Shcherbina
2013/03/18 15:15:18
Currently these 'composite' features only refer to
khim
2013/03/19 14:54:46
Done.
| |
40 #define CPUFeature_AVX cpu_features->data[NaClCPUFeatureX86_AVX] | 40 #define CPUFeature_AVX cpu_features->data[NaClCPUFeatureX86_AVX] |
41 #define CPUFeature_BMI1 cpu_features->data[NaClCPUFeatureX86_BMI1] | 41 #define CPUFeature_BMI1 cpu_features->data[NaClCPUFeatureX86_BMI1] |
42 #define CPUFeature_CLFLUSH cpu_features->data[NaClCPUFeatureX86_CLFLUSH] | 42 #define CPUFeature_CLFLUSH cpu_features->data[NaClCPUFeatureX86_CLFLUSH] |
43 #define CPUFeature_CLMUL cpu_features->data[NaClCPUFeatureX86_CLMUL] | 43 #define CPUFeature_CLMUL cpu_features->data[NaClCPUFeatureX86_CLMUL] |
44 #define CPUFeature_CLMULAVX CPUFeature_CLMUL && CPUFeature_AVX | 44 #define CPUFeature_CLMULAVX CPUFeature_CLMUL && CPUFeature_AVX |
45 #define CPUFeature_CMOV cpu_features->data[NaClCPUFeatureX86_CMOV] | 45 #define CPUFeature_CMOV cpu_features->data[NaClCPUFeatureX86_CMOV] |
46 #define CPUFeature_CMOVx87 CPUFeature_CMOV && CPUFeature_x87 | 46 #define CPUFeature_CMOVx87 CPUFeature_CMOV && CPUFeature_x87 |
47 #define CPUFeature_CX16 cpu_features->data[NaClCPUFeatureX86_CX16] | 47 #define CPUFeature_CX16 cpu_features->data[NaClCPUFeatureX86_CX16] |
48 #define CPUFeature_CX8 cpu_features->data[NaClCPUFeatureX86_CX8] | 48 #define CPUFeature_CX8 cpu_features->data[NaClCPUFeatureX86_CX8] |
49 #define CPUFeature_E3DNOW cpu_features->data[NaClCPUFeatureX86_E3DNOW] | 49 #define CPUFeature_E3DNOW cpu_features->data[NaClCPUFeatureX86_E3DNOW] |
(...skipping 27 matching lines...) Expand all Loading... | |
77 #define CPUFeature_TBM cpu_features->data[NaClCPUFeatureX86_TBM] | 77 #define CPUFeature_TBM cpu_features->data[NaClCPUFeatureX86_TBM] |
78 #define CPUFeature_TSC cpu_features->data[NaClCPUFeatureX86_TSC] | 78 #define CPUFeature_TSC cpu_features->data[NaClCPUFeatureX86_TSC] |
79 /* | 79 /* |
80 * We allow tzcnt unconditionally | 80 * We allow tzcnt unconditionally |
81 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | 81 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 |
82 */ | 82 */ |
83 #define CPUFeature_TZCNT TRUE | 83 #define CPUFeature_TZCNT TRUE |
84 #define CPUFeature_x87 cpu_features->data[NaClCPUFeatureX86_x87] | 84 #define CPUFeature_x87 cpu_features->data[NaClCPUFeatureX86_x87] |
85 #define CPUFeature_XOP cpu_features->data[NaClCPUFeatureX86_XOP] | 85 #define CPUFeature_XOP cpu_features->data[NaClCPUFeatureX86_XOP] |
86 | 86 |
87 #define CPUFeature_3DNOW_Allowed \ | 87 #define CPUFeature_3DNOW_Allowed \ |
Vlad Shcherbina
2013/03/18 15:15:18
Currently macros CPUFeature_<whatever>_Allowed com
khim
2013/03/19 14:54:46
Done.
| |
88 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_3DNOW] | 88 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_3DNOW] |
89 /* | 89 /* |
90 * AMD documentation claims it's always available if CPUFeature_LM is present, | 90 * AMD documentation claims it's always available if CPUFeature_LM is present, |
91 * But Intel documentation does not even mention it! | 91 * But Intel documentation does not even mention it! |
92 * Keep it as 3DNow! instruction. | 92 * Keep it as 3DNow! instruction. |
93 */ | 93 */ |
94 #define CPUFeature_3DPRFTCH_Allowed \ | 94 #define CPUFeature_3DPRFTCH_Allowed \ |
95 CPUFeature_3DNOW_Allowed || CPUFeature_PRE_Allowed | 95 CPUFeature_3DNOW_Allowed || CPUFeature_PRE_Allowed |
96 #define CPUFeature_AES_Allowed \ | 96 #define CPUFeature_AES_Allowed \ |
97 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_AES] | 97 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_AES] |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
175 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | 175 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 |
176 */ | 176 */ |
177 #define CPUFeature_TZCNT_Allowed TRUE | 177 #define CPUFeature_TZCNT_Allowed TRUE |
178 #define CPUFeature_x87_Allowed \ | 178 #define CPUFeature_x87_Allowed \ |
179 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_x87] | 179 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_x87] |
180 #define CPUFeature_XOP_Allowed \ | 180 #define CPUFeature_XOP_Allowed \ |
181 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_XOP] | 181 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_XOP] |
182 | 182 |
183 /* Remember some information about instruction for further processing. */ | 183 /* Remember some information about instruction for further processing. */ |
184 #define GET_REX_PREFIX() rex_prefix | 184 #define GET_REX_PREFIX() rex_prefix |
185 #define SET_REX_PREFIX(P) rex_prefix = (P) | 185 #define SET_REX_PREFIX(P) rex_prefix = (P) |
halyavin
2013/03/18 11:58:44
P->PREFIX
khim
2013/03/19 14:54:46
Long live cryptic error messages!
But as I've sai
| |
186 #define GET_VEX_PREFIX2() vex_prefix2 | 186 #define GET_VEX_PREFIX2() vex_prefix2 |
187 #define SET_VEX_PREFIX2(P) vex_prefix2 = (P) | 187 #define SET_VEX_PREFIX2(P) vex_prefix2 = (P) |
188 #define GET_VEX_PREFIX3() vex_prefix3 | 188 #define GET_VEX_PREFIX3() vex_prefix3 |
189 #define SET_VEX_PREFIX3(P) vex_prefix3 = (P) | 189 #define SET_VEX_PREFIX3(P) vex_prefix3 = (P) |
190 #define SET_MODRM_BASE(N) base = (N) | 190 #define SET_MODRM_BASE(N) base = (N) |
halyavin
2013/03/18 11:58:44
N->REG?
khim
2013/03/19 14:54:46
Done.
| |
191 #define SET_MODRM_INDEX(N) index = (N) | 191 #define SET_MODRM_INDEX(N) index = (N) |
192 | 192 |
193 /* Ignore this information for now. */ | 193 /* Ignore this information for now. */ |
194 #define SET_DATA16_PREFIX(S) | 194 #define SET_DATA16_PREFIX(S) |
halyavin
2013/03/18 11:58:44
What is S and P below?
khim
2013/03/19 14:54:46
Expanded (expect cryptic error messages in the fut
| |
195 #define SET_REPZ_PREFIX(S) | 195 #define SET_REPZ_PREFIX(S) |
196 #define SET_REPNZ_PREFIX(S) | 196 #define SET_REPNZ_PREFIX(S) |
197 #define SET_MODRM_SCALE(S) | 197 #define SET_MODRM_SCALE(S) |
198 #define SET_DISP_PTR(P) | 198 #define SET_DISP_PTR(P) |
199 #define SET_IMM_PTR(P) | 199 #define SET_IMM_PTR(P) |
200 #define SET_IMM2_PTR(P) | 200 #define SET_IMM2_PTR(P) |
201 | 201 |
202 /* | 202 /* |
203 * Collect information about anyfields (offsets and immediates). | 203 * Collect information about anyfields (offsets and immediates). |
204 * Note: we use += below instead of |=. This means two immediate fields will | 204 * Note: we use += below instead of |=. This means two immediate fields will |
205 * be treated as one. It's not important for safety. | 205 * be treated as one. It's not important for safety. |
206 */ | 206 */ |
207 #define SET_DISP_TYPE(T) SET_DISP_TYPE_##T | 207 #define SET_DISP_TYPE(T) SET_DISP_TYPE_##T |
halyavin
2013/03/18 11:58:44
T->TYPE
khim
2013/03/19 14:54:46
Ditto.
| |
208 #define SET_DISP_TYPE_DISPNONE | 208 #define SET_DISP_TYPE_DISPNONE |
209 #define SET_DISP_TYPE_DISP8 (instruction_info_collected += DISPLACEMENT_8BIT) | 209 #define SET_DISP_TYPE_DISP8 (instruction_info_collected += DISPLACEMENT_8BIT) |
210 #define SET_DISP_TYPE_DISP32 (instruction_info_collected += DISPLACEMENT_32BIT) | 210 #define SET_DISP_TYPE_DISP32 (instruction_info_collected += DISPLACEMENT_32BIT) |
211 #define SET_IMM_TYPE(T) SET_IMM_TYPE_##T | 211 #define SET_IMM_TYPE(T) SET_IMM_TYPE_##T |
212 /* imm2 field is a flag, not accumulator, like with other immediates */ | 212 /* imm2 field is a flag, not accumulator, like with other immediates */ |
halyavin
2013/03/18 11:58:44
like other immediates
khim
2013/03/19 14:54:46
Done.
| |
213 #define SET_IMM_TYPE_IMM2 (instruction_info_collected |= IMMEDIATE_2BIT) | 213 #define SET_IMM_TYPE_IMM2 (instruction_info_collected |= IMMEDIATE_2BIT) |
214 #define SET_IMM_TYPE_IMM8 (instruction_info_collected += IMMEDIATE_8BIT) | 214 #define SET_IMM_TYPE_IMM8 (instruction_info_collected += IMMEDIATE_8BIT) |
215 #define SET_IMM_TYPE_IMM16 (instruction_info_collected += IMMEDIATE_16BIT) | 215 #define SET_IMM_TYPE_IMM16 (instruction_info_collected += IMMEDIATE_16BIT) |
216 #define SET_IMM_TYPE_IMM32 (instruction_info_collected += IMMEDIATE_32BIT) | 216 #define SET_IMM_TYPE_IMM32 (instruction_info_collected += IMMEDIATE_32BIT) |
217 #define SET_IMM_TYPE_IMM64 (instruction_info_collected += IMMEDIATE_64BIT) | 217 #define SET_IMM_TYPE_IMM64 (instruction_info_collected += IMMEDIATE_64BIT) |
218 #define SET_IMM2_TYPE(T) SET_IMM2_TYPE_##T | 218 #define SET_IMM2_TYPE(T) SET_IMM2_TYPE_##T |
halyavin
2013/03/18 11:58:44
SET_SECOND_IMM_TYPE, T->TYPE
khim
2013/03/19 14:54:46
Done.
| |
219 #define SET_IMM2_TYPE_IMM8 \ | 219 #define SET_IMM2_TYPE_IMM8 \ |
220 (instruction_info_collected += SECOND_IMMEDIATE_8BIT) | 220 (instruction_info_collected += SECOND_IMMEDIATE_8BIT) |
221 #define SET_IMM2_TYPE_IMM16 \ | 221 #define SET_IMM2_TYPE_IMM16 \ |
222 (instruction_info_collected += SECOND_IMMEDIATE_16BIT) | 222 (instruction_info_collected += SECOND_IMMEDIATE_16BIT) |
223 | 223 |
224 /* Mark the destination of a jump instruction and make an early validity check: | 224 /* Mark the destination of a jump instruction and make an early validity check: |
225 * to jump outside given code region, the target address must be aligned. | 225 * to jump outside given code region, the target address must be aligned. |
halyavin
2013/03/18 11:58:44
jump target outside of given code region must be a
khim
2013/03/19 14:54:46
Done.
| |
226 * | 226 * |
227 * Returns TRUE iff the jump passes the early validity check. | 227 * Returns TRUE iff the jump passes the early validity check. |
228 */ | 228 */ |
229 static FORCEINLINE int MarkJumpTarget(size_t jump_dest, | 229 static FORCEINLINE int MarkJumpTarget(size_t jump_dest, |
230 bitmap_word *jump_dests, | 230 bitmap_word *jump_dests, |
231 size_t size) { | 231 size_t size) { |
232 if ((jump_dest & kBundleMask) == 0) { | 232 if ((jump_dest & kBundleMask) == 0) { |
233 return TRUE; | 233 return TRUE; |
234 } | 234 } |
235 if (jump_dest >= size) { | 235 if (jump_dest >= size) { |
(...skipping 22 matching lines...) Expand all Loading... | |
258 /* | 258 /* |
259 * Mark the given addresses as invalid jump target addresses (that is: unmark | 259 * Mark the given addresses as invalid jump target addresses (that is: unmark |
260 * them). | 260 * them). |
261 */ | 261 */ |
262 static FORCEINLINE void UnmarkValidJumpTargets(size_t address, | 262 static FORCEINLINE void UnmarkValidJumpTargets(size_t address, |
263 size_t bytes, | 263 size_t bytes, |
264 bitmap_word *valid_targets) { | 264 bitmap_word *valid_targets) { |
265 BitmapClearBits(valid_targets, address, bytes); | 265 BitmapClearBits(valid_targets, address, bytes); |
266 } | 266 } |
267 | 267 |
268 static INLINE Bool ProcessInvalidJumpTargets( | 268 static INLINE Bool ProcessInvalidJumpTargets( |
halyavin
2013/03/18 14:23:12
/* Compare valid_targets and jump_dests and call c
khim
2013/03/19 14:54:46
Done.
| |
269 const uint8_t *data, | 269 const uint8_t *data, |
halyavin
2013/03/18 14:23:12
data -> code
khim
2013/03/19 14:54:46
Done.
| |
270 size_t size, | 270 size_t size, |
271 bitmap_word *valid_targets, | 271 bitmap_word *valid_targets, |
272 bitmap_word *jump_dests, | 272 bitmap_word *jump_dests, |
273 ValidationCallbackFunc user_callback, | 273 ValidationCallbackFunc user_callback, |
274 void *callback_data) { | 274 void *callback_data) { |
275 size_t elements = (size + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE; | 275 size_t elements = (size + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE; |
276 size_t i, j; | 276 size_t i, j; |
277 Bool result = TRUE; | 277 Bool result = TRUE; |
278 | 278 |
279 for (i = 0; i < elements ; i++) { | 279 for (i = 0; i < elements ; i++) { |
halyavin
2013/03/18 14:23:12
extra space
khim
2013/03/19 14:54:46
Done.
| |
280 bitmap_word jump_dest_mask = jump_dests[i]; | 280 bitmap_word jump_dest_mask = jump_dests[i]; |
281 bitmap_word valid_target_mask = valid_targets[i]; | 281 bitmap_word valid_target_mask = valid_targets[i]; |
282 if ((jump_dest_mask & ~valid_target_mask) != 0) { | 282 if ((jump_dest_mask & ~valid_target_mask) != 0) { |
283 for (j = i * NACL_HOST_WORDSIZE; j < (i + 1) * NACL_HOST_WORDSIZE; j++) | 283 for (j = i * NACL_HOST_WORDSIZE; j < (i + 1) * NACL_HOST_WORDSIZE; j++) |
284 if (BitmapIsBitSet(jump_dests, j) && | 284 if (BitmapIsBitSet(jump_dests, j) && |
285 !BitmapIsBitSet(valid_targets, j)) { | 285 !BitmapIsBitSet(valid_targets, j)) { |
286 result &= user_callback(data + j, | 286 result &= user_callback(data + j, |
287 data + j, | 287 data + j, |
288 BAD_JUMP_TARGET, | 288 BAD_JUMP_TARGET, |
289 callback_data); | 289 callback_data); |
290 } | 290 } |
291 } | 291 } |
292 } | 292 } |
293 | 293 |
294 return result; | 294 return result; |
295 } | 295 } |
296 | 296 |
297 | 297 |
298 /* | 298 /* |
299 * Process rel8_operand. Note: rip points to the beginning of the next | 299 * Process rel8_operand. Note: rip points to the beginning of the next |
300 * instruction here and x86 encoding guarantees rel8 field is the last one | 300 * instruction here and x86 encoding guarantees rel8 field is the last one |
301 * in a current instruction. | 301 * in a current instruction. |
302 */ | 302 */ |
303 static FORCEINLINE void Rel8Operand(const uint8_t *rip, | 303 static FORCEINLINE void Rel8Operand(const uint8_t *rip, |
304 const uint8_t* codeblock_start, | 304 const uint8_t* codeblock_start, |
305 bitmap_word *jump_dests, | 305 bitmap_word *jump_dests, |
306 size_t jumpdests_size, | 306 size_t jumpdests_size, |
307 uint32_t *instruction_info_collected) { | 307 uint32_t *instruction_info_collected) { |
308 int8_t offset = (uint8_t) (rip[-1]); | 308 int8_t offset = (uint8_t) (rip[-1]); |
halyavin
2013/03/18 14:23:12
remove uint8_t
khim
2013/03/19 14:54:46
Done.
| |
309 size_t jump_dest = offset + (rip - codeblock_start); | 309 size_t jump_dest = offset + (rip - codeblock_start); |
310 | 310 |
311 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) | 311 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) |
312 *instruction_info_collected |= RELATIVE_8BIT; | 312 *instruction_info_collected |= RELATIVE_8BIT; |
313 else | 313 else |
314 *instruction_info_collected |= RELATIVE_8BIT | DIRECT_JUMP_OUT_OF_RANGE; | 314 *instruction_info_collected |= RELATIVE_8BIT | DIRECT_JUMP_OUT_OF_RANGE; |
315 } | 315 } |
316 | 316 |
317 /* | 317 /* |
318 * Process rel32_operand. Note: rip points to the beginning of the next | 318 * Process rel32_operand. Note: rip points to the beginning of the next |
319 * instruction here and x86 encoding guarantees rel32 field is the last one | 319 * instruction here and x86 encoding guarantees rel32 field is the last one |
320 * in a current instruction. | 320 * in a current instruction. |
321 */ | 321 */ |
322 static FORCEINLINE void Rel32Operand(const uint8_t *rip, | 322 static FORCEINLINE void Rel32Operand(const uint8_t *rip, |
323 const uint8_t* codeblock_start, | 323 const uint8_t* codeblock_start, |
324 bitmap_word *jump_dests, | 324 bitmap_word *jump_dests, |
325 size_t jumpdests_size, | 325 size_t jumpdests_size, |
326 uint32_t *instruction_info_collected) { | 326 uint32_t *instruction_info_collected) { |
327 int32_t offset = (rip[-4] + 256U * (rip[-3] + 256U * ( | 327 int32_t offset = (rip[-4] + 256U * (rip[-3] + 256U * ( |
328 rip[-2] + 256U * ((uint32_t) rip[-1])))); | 328 rip[-2] + 256U * ((uint32_t) rip[-1])))); |
halyavin
2013/03/18 14:23:12
remove uint32_t
khim
2013/03/19 14:54:46
Done.
| |
329 size_t jump_dest = offset + (rip - codeblock_start); | 329 size_t jump_dest = offset + (rip - codeblock_start); |
330 | 330 |
331 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) | 331 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) |
332 *instruction_info_collected |= RELATIVE_32BIT; | 332 *instruction_info_collected |= RELATIVE_32BIT; |
333 else | 333 else |
334 *instruction_info_collected |= RELATIVE_32BIT | DIRECT_JUMP_OUT_OF_RANGE; | 334 *instruction_info_collected |= RELATIVE_32BIT | DIRECT_JUMP_OUT_OF_RANGE; |
335 } | 335 } |
336 | 336 |
337 #endif /* NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ */ | 337 #endif /* NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ */ |
OLD | NEW |