OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. | 2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. |
3 * Use of this source code is governed by a BSD-style license that can be | 3 * Use of this source code is governed by a BSD-style license that can be |
4 * found in the LICENSE file. | 4 * found in the LICENSE file. |
5 */ | 5 */ |
6 | 6 |
7 /* | 7 /* |
8 * This file contains common parts of x86-32 and x86-64 internals (inline | 8 * This file contains common parts of x86-32 and x86-64 internals (inline |
9 * functions and defines). | 9 * functions and defines). |
10 */ | 10 */ |
11 | 11 |
12 #ifndef NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ | 12 #ifndef NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ |
13 #define NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ | 13 #define NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ |
14 | 14 |
15 #include "native_client/src/shared/platform/nacl_check.h" | 15 #include "native_client/src/shared/platform/nacl_check.h" |
16 #include "native_client/src/shared/utils/types.h" | 16 #include "native_client/src/shared/utils/types.h" |
17 #include "native_client/src/trusted/validator_ragel/unreviewed/decoding.h" | 17 #include "native_client/src/trusted/validator_ragel/decoding.h" |
18 #include "native_client/src/trusted/validator_ragel/validator.h" | 18 #include "native_client/src/trusted/validator_ragel/validator.h" |
19 | 19 |
20 /* Maximum set of R-DFA allowable CPUID features. */ | 20 /* Maximum set of R-DFA allowable CPUID features. */ |
21 extern const NaClCPUFeaturesX86 kValidatorCPUIDFeatures; | 21 extern const NaClCPUFeaturesX86 kValidatorCPUIDFeatures; |
22 | 22 |
23 /* Macroses to suppport CPUID handling. */ | 23 /* Macroses to support CPUID handling. */ |
24 #define SET_CPU_FEATURE(F) \ | 24 |
25 if (!(F##_Allowed)) { \ | 25 /* |
| 26 * Main macro: FEATURE parameter here is one of the macroses below, e.g. |
| 27 * SET_CPU_FEATURE(CPUFeature_AESAVX). |
| 28 */ |
| 29 #define SET_CPU_FEATURE(FEATURE) \ |
| 30 if (!(FEATURE(kValidatorCPUIDFeatures.data))) { \ |
26 instruction_info_collected |= UNRECOGNIZED_INSTRUCTION; \ | 31 instruction_info_collected |= UNRECOGNIZED_INSTRUCTION; \ |
27 } \ | 32 } \ |
28 if (!(F)) { \ | 33 if (!(FEATURE(cpu_features->data))) { \ |
29 instruction_info_collected |= CPUID_UNSUPPORTED_INSTRUCTION; \ | 34 instruction_info_collected |= CPUID_UNSUPPORTED_INSTRUCTION; \ |
30 } | 35 } |
31 #define CPUFeature_3DNOW cpu_features->data[NaClCPUFeatureX86_3DNOW] | 36 /* |
| 37 * Macroses to access induvidual elements of NaClCPUFeaturesX86 structure, |
| 38 * e.g. CPUFeature_AESAVX(kValidatorCPUIDFeatures.data). |
| 39 */ |
| 40 #define CPUFeature_3DNOW(FEATURES) FEATURES[NaClCPUFeatureX86_3DNOW] |
32 /* | 41 /* |
33 * AMD documentation claims it's always available if CPUFeature_LM is present, | 42 * AMD documentation claims it's always available if CPUFeature_LM is present, |
34 * But Intel documentation does not even mention it! | 43 * But Intel documentation does not even mention it! |
35 * Keep it as 3DNow! instruction. | 44 * Keep it as 3DNow! instruction. |
36 */ | 45 */ |
37 #define CPUFeature_3DPRFTCH CPUFeature_3DNOW || CPUFeature_PRE | 46 #define CPUFeature_3DPRFTCH(FEATURES) \ |
38 #define CPUFeature_AES cpu_features->data[NaClCPUFeatureX86_AES] | 47 (CPUFeature_3DNOW(FEATURES) || CPUFeature_PRE(FEATURES)) |
39 #define CPUFeature_AESAVX CPUFeature_AES && CPUFeature_AVX | 48 #define CPUFeature_AES(FEATURES) FEATURES[NaClCPUFeatureX86_AES] |
40 #define CPUFeature_AVX cpu_features->data[NaClCPUFeatureX86_AVX] | 49 #define CPUFeature_AESAVX(FEATURES) \ |
41 #define CPUFeature_BMI1 cpu_features->data[NaClCPUFeatureX86_BMI1] | 50 (CPUFeature_AES(FEATURES) && CPUFeature_AVX(FEATURES)) |
42 #define CPUFeature_CLFLUSH cpu_features->data[NaClCPUFeatureX86_CLFLUSH] | 51 #define CPUFeature_AVX(FEATURES) FEATURES[NaClCPUFeatureX86_AVX] |
43 #define CPUFeature_CLMUL cpu_features->data[NaClCPUFeatureX86_CLMUL] | 52 #define CPUFeature_BMI1(FEATURES) FEATURES[NaClCPUFeatureX86_BMI1] |
44 #define CPUFeature_CLMULAVX CPUFeature_CLMUL && CPUFeature_AVX | 53 #define CPUFeature_CLFLUSH(FEATURES) FEATURES[NaClCPUFeatureX86_CLFLUSH] |
45 #define CPUFeature_CMOV cpu_features->data[NaClCPUFeatureX86_CMOV] | 54 #define CPUFeature_CLMUL(FEATURES) FEATURES[NaClCPUFeatureX86_CLMUL] |
46 #define CPUFeature_CMOVx87 CPUFeature_CMOV && CPUFeature_x87 | 55 #define CPUFeature_CLMULAVX(FEATURES) \ |
47 #define CPUFeature_CX16 cpu_features->data[NaClCPUFeatureX86_CX16] | 56 (CPUFeature_CLMUL(FEATURES) && CPUFeature_AVX(FEATURES)) |
48 #define CPUFeature_CX8 cpu_features->data[NaClCPUFeatureX86_CX8] | 57 #define CPUFeature_CMOV(FEATURES) FEATURES[NaClCPUFeatureX86_CMOV] |
49 #define CPUFeature_E3DNOW cpu_features->data[NaClCPUFeatureX86_E3DNOW] | 58 #define CPUFeature_CMOVx87(FEATURES) \ |
50 #define CPUFeature_EMMX cpu_features->data[NaClCPUFeatureX86_EMMX] | 59 (CPUFeature_CMOV(FEATURES) && CPUFeature_x87(FEATURES)) |
51 #define CPUFeature_EMMXSSE CPUFeature_EMMX || CPUFeature_SSE | 60 #define CPUFeature_CX16(FEATURES) FEATURES[NaClCPUFeatureX86_CX16] |
52 #define CPUFeature_F16C cpu_features->data[NaClCPUFeatureX86_F16C] | 61 #define CPUFeature_CX8(FEATURES) FEATURES[NaClCPUFeatureX86_CX8] |
53 #define CPUFeature_FMA cpu_features->data[NaClCPUFeatureX86_FMA] | 62 #define CPUFeature_E3DNOW(FEATURES) FEATURES[NaClCPUFeatureX86_E3DNOW] |
54 #define CPUFeature_FMA4 cpu_features->data[NaClCPUFeatureX86_FMA4] | 63 #define CPUFeature_EMMX(FEATURES) FEATURES[NaClCPUFeatureX86_EMMX] |
55 #define CPUFeature_FXSR cpu_features->data[NaClCPUFeatureX86_FXSR] | 64 #define CPUFeature_EMMXSSE(FEATURES) \ |
56 #define CPUFeature_LAHF cpu_features->data[NaClCPUFeatureX86_LAHF] | 65 (CPUFeature_EMMX(FEATURES) || CPUFeature_SSE(FEATURES)) |
57 #define CPUFeature_LM cpu_features->data[NaClCPUFeatureX86_LM] | 66 #define CPUFeature_F16C(FEATURES) FEATURES[NaClCPUFeatureX86_F16C] |
58 #define CPUFeature_LWP cpu_features->data[NaClCPUFeatureX86_LWP] | 67 #define CPUFeature_FMA(FEATURES) FEATURES[NaClCPUFeatureX86_FMA] |
| 68 #define CPUFeature_FMA4(FEATURES) FEATURES[NaClCPUFeatureX86_FMA4] |
| 69 #define CPUFeature_FXSR(FEATURES) FEATURES[NaClCPUFeatureX86_FXSR] |
| 70 #define CPUFeature_LAHF(FEATURES) FEATURES[NaClCPUFeatureX86_LAHF] |
| 71 #define CPUFeature_LM(FEATURES) FEATURES[NaClCPUFeatureX86_LM] |
| 72 #define CPUFeature_LWP(FEATURES) FEATURES[NaClCPUFeatureX86_LWP] |
59 /* | 73 /* |
60 * We allow lzcnt unconditionally | 74 * We allow lzcnt unconditionally |
61 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | 75 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 |
62 */ | 76 */ |
63 #define CPUFeature_LZCNT TRUE | 77 #define CPUFeature_LZCNT(FEATURES) TRUE |
64 #define CPUFeature_MMX cpu_features->data[NaClCPUFeatureX86_MMX] | 78 #define CPUFeature_MMX(FEATURES) FEATURES[NaClCPUFeatureX86_MMX] |
65 #define CPUFeature_MON cpu_features->data[NaClCPUFeatureX86_MON] | 79 #define CPUFeature_MON(FEATURES) FEATURES[NaClCPUFeatureX86_MON] |
66 #define CPUFeature_MOVBE cpu_features->data[NaClCPUFeatureX86_MOVBE] | 80 #define CPUFeature_MOVBE(FEATURES) FEATURES[NaClCPUFeatureX86_MOVBE] |
67 #define CPUFeature_OSXSAVE cpu_features->data[NaClCPUFeatureX86_OSXSAVE] | 81 #define CPUFeature_OSXSAVE(FEATURES) FEATURES[NaClCPUFeatureX86_OSXSAVE] |
68 #define CPUFeature_POPCNT cpu_features->data[NaClCPUFeatureX86_POPCNT] | 82 #define CPUFeature_POPCNT(FEATURES) FEATURES[NaClCPUFeatureX86_POPCNT] |
69 #define CPUFeature_PRE cpu_features->data[NaClCPUFeatureX86_PRE] | 83 #define CPUFeature_PRE(FEATURES) FEATURES[NaClCPUFeatureX86_PRE] |
70 #define CPUFeature_SSE cpu_features->data[NaClCPUFeatureX86_SSE] | 84 #define CPUFeature_SSE(FEATURES) FEATURES[NaClCPUFeatureX86_SSE] |
71 #define CPUFeature_SSE2 cpu_features->data[NaClCPUFeatureX86_SSE2] | 85 #define CPUFeature_SSE2(FEATURES) FEATURES[NaClCPUFeatureX86_SSE2] |
72 #define CPUFeature_SSE3 cpu_features->data[NaClCPUFeatureX86_SSE3] | 86 #define CPUFeature_SSE3(FEATURES) FEATURES[NaClCPUFeatureX86_SSE3] |
73 #define CPUFeature_SSE41 cpu_features->data[NaClCPUFeatureX86_SSE41] | 87 #define CPUFeature_SSE41(FEATURES) FEATURES[NaClCPUFeatureX86_SSE41] |
74 #define CPUFeature_SSE42 cpu_features->data[NaClCPUFeatureX86_SSE42] | 88 #define CPUFeature_SSE42(FEATURES) FEATURES[NaClCPUFeatureX86_SSE42] |
75 #define CPUFeature_SSE4A cpu_features->data[NaClCPUFeatureX86_SSE4A] | 89 #define CPUFeature_SSE4A(FEATURES) FEATURES[NaClCPUFeatureX86_SSE4A] |
76 #define CPUFeature_SSSE3 cpu_features->data[NaClCPUFeatureX86_SSSE3] | 90 #define CPUFeature_SSSE3(FEATURES) FEATURES[NaClCPUFeatureX86_SSSE3] |
77 #define CPUFeature_TBM cpu_features->data[NaClCPUFeatureX86_TBM] | 91 #define CPUFeature_TBM(FEATURES) FEATURES[NaClCPUFeatureX86_TBM] |
78 #define CPUFeature_TSC cpu_features->data[NaClCPUFeatureX86_TSC] | 92 #define CPUFeature_TSC(FEATURES) FEATURES[NaClCPUFeatureX86_TSC] |
79 /* | 93 /* |
80 * We allow tzcnt unconditionally | 94 * We allow tzcnt unconditionally |
81 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | 95 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 |
82 */ | 96 */ |
83 #define CPUFeature_TZCNT TRUE | 97 #define CPUFeature_TZCNT(FEATURES) TRUE |
84 #define CPUFeature_x87 cpu_features->data[NaClCPUFeatureX86_x87] | 98 #define CPUFeature_x87(FEATURES) FEATURES[NaClCPUFeatureX86_x87] |
85 #define CPUFeature_XOP cpu_features->data[NaClCPUFeatureX86_XOP] | 99 #define CPUFeature_XOP(FEATURES) FEATURES[NaClCPUFeatureX86_XOP] |
86 | |
87 #define CPUFeature_3DNOW_Allowed \ | |
88 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_3DNOW] | |
89 /* | |
90 * AMD documentation claims it's always available if CPUFeature_LM is present, | |
91 * But Intel documentation does not even mention it! | |
92 * Keep it as 3DNow! instruction. | |
93 */ | |
94 #define CPUFeature_3DPRFTCH_Allowed \ | |
95 CPUFeature_3DNOW_Allowed || CPUFeature_PRE_Allowed | |
96 #define CPUFeature_AES_Allowed \ | |
97 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_AES] | |
98 #define CPUFeature_AESAVX_Allowed \ | |
99 CPUFeature_AES_Allowed && CPUFeature_AVX_Allowed | |
100 #define CPUFeature_AVX_Allowed \ | |
101 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_AVX] | |
102 #define CPUFeature_BMI1_Allowed \ | |
103 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_BMI1] | |
104 #define CPUFeature_CLFLUSH_Allowed \ | |
105 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CLFLUSH] | |
106 #define CPUFeature_CLMUL_Allowed \ | |
107 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CLMUL] | |
108 #define CPUFeature_CLMULAVX_Allowed \ | |
109 CPUFeature_CLMUL_Allowed && CPUFeature_AVX_Allowed | |
110 #define CPUFeature_CMOV_Allowed \ | |
111 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CMOV] | |
112 #define CPUFeature_CMOVx87_Allowed \ | |
113 CPUFeature_CMOV_Allowed && CPUFeature_x87_Allowed | |
114 #define CPUFeature_CX16_Allowed \ | |
115 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CX16] | |
116 #define CPUFeature_CX8_Allowed \ | |
117 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CX8] | |
118 #define CPUFeature_E3DNOW_Allowed \ | |
119 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_E3DNOW] | |
120 #define CPUFeature_EMMX_Allowed \ | |
121 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_EMMX] | |
122 #define CPUFeature_EMMXSSE_Allowed \ | |
123 CPUFeature_EMMX_Allowed || CPUFeature_SSE_Allowed | |
124 #define CPUFeature_F16C_Allowed \ | |
125 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_F16C] | |
126 #define CPUFeature_FMA_Allowed \ | |
127 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_FMA] | |
128 #define CPUFeature_FMA4_Allowed \ | |
129 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_FMA4] | |
130 #define CPUFeature_FXSR_Allowed \ | |
131 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_FXSR] | |
132 #define CPUFeature_LAHF_Allowed \ | |
133 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_LAHF] | |
134 #define CPUFeature_LM_Allowed \ | |
135 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_LM] | |
136 #define CPUFeature_LWP_Allowed \ | |
137 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_LWP] | |
138 /* | |
139 * We allow lzcnt unconditionally | |
140 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | |
141 */ | |
142 #define CPUFeature_LZCNT_Allowed TRUE | |
143 #define CPUFeature_MMX_Allowed \ | |
144 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_MMX] | |
145 #define CPUFeature_MON_Allowed \ | |
146 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_MON] | |
147 #define CPUFeature_MOVBE_Allowed \ | |
148 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_MOVBE] | |
149 #define CPUFeature_OSXSAVE_Allowed \ | |
150 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_OSXSAVE] | |
151 #define CPUFeature_POPCNT_Allowed \ | |
152 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_POPCNT] | |
153 #define CPUFeature_PRE_Allowed \ | |
154 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_PRE] | |
155 #define CPUFeature_SSE_Allowed \ | |
156 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE] | |
157 #define CPUFeature_SSE2_Allowed \ | |
158 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE2] | |
159 #define CPUFeature_SSE3_Allowed \ | |
160 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE3] | |
161 #define CPUFeature_SSE41_Allowed \ | |
162 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE41] | |
163 #define CPUFeature_SSE42_Allowed \ | |
164 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE42] | |
165 #define CPUFeature_SSE4A_Allowed \ | |
166 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE4A] | |
167 #define CPUFeature_SSSE3_Allowed \ | |
168 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSSE3] | |
169 #define CPUFeature_TBM_Allowed \ | |
170 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_TBM] | |
171 #define CPUFeature_TSC_Allowed \ | |
172 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_TSC] | |
173 /* | |
174 * We allow tzcnt unconditionally | |
175 * See http://code.google.com/p/nativeclient/issues/detail?id=2869 | |
176 */ | |
177 #define CPUFeature_TZCNT_Allowed TRUE | |
178 #define CPUFeature_x87_Allowed \ | |
179 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_x87] | |
180 #define CPUFeature_XOP_Allowed \ | |
181 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_XOP] | |
182 | 100 |
183 /* Remember some information about instruction for further processing. */ | 101 /* Remember some information about instruction for further processing. */ |
184 #define GET_REX_PREFIX() rex_prefix | 102 #define GET_REX_PREFIX() rex_prefix |
185 #define SET_REX_PREFIX(P) rex_prefix = (P) | 103 #define SET_REX_PREFIX(PREFIX_BYTE) rex_prefix = (PREFIX_BYTE) |
186 #define GET_VEX_PREFIX2() vex_prefix2 | 104 #define GET_VEX_PREFIX2() vex_prefix2 |
187 #define SET_VEX_PREFIX2(P) vex_prefix2 = (P) | 105 #define SET_VEX_PREFIX2(PREFIX_BYTE) vex_prefix2 = (PREFIX_BYTE) |
188 #define GET_VEX_PREFIX3() vex_prefix3 | 106 #define GET_VEX_PREFIX3() vex_prefix3 |
189 #define SET_VEX_PREFIX3(P) vex_prefix3 = (P) | 107 #define SET_VEX_PREFIX3(PREFIX_BYTE) vex_prefix3 = (PREFIX_BYTE) |
190 #define SET_MODRM_BASE(N) base = (N) | 108 #define SET_MODRM_BASE(REG_NUMBER) base = (REG_NUMBER) |
191 #define SET_MODRM_INDEX(N) index = (N) | 109 #define SET_MODRM_INDEX(REG_NUMBER) index = (REG_NUMBER) |
192 | 110 |
193 /* Ignore this information for now. */ | 111 /* Ignore this information for now. */ |
194 #define SET_DATA16_PREFIX(S) | 112 #define SET_DATA16_PREFIX(STATUS) |
195 #define SET_REPZ_PREFIX(S) | 113 #define SET_REPZ_PREFIX(STATUS) |
196 #define SET_REPNZ_PREFIX(S) | 114 #define SET_REPNZ_PREFIX(STATUS) |
197 #define SET_MODRM_SCALE(S) | 115 #define SET_MODRM_SCALE(VALUE) |
198 #define SET_DISP_PTR(P) | 116 #define SET_DISPLACEMENT_POINTER(POINTER) |
199 #define SET_IMM_PTR(P) | 117 #define SET_IMMEDIATE_POINTER(POINTER) |
200 #define SET_IMM2_PTR(P) | 118 #define SET_SECOND_IMMEDIATE_POINTER(POINTER) |
201 | 119 |
202 /* | 120 /* |
203 * Collect information about anyfields (offsets and immediates). | 121 * Collect information about anyfields (offsets and immediates). |
204 * Note: we use += below instead of |=. This means two immediate fields will | 122 * Note: we use += below instead of |=. This means two immediate fields will |
205 * be treated as one. It's not important for safety. | 123 * be treated as one. It's not important for safety. |
206 */ | 124 */ |
207 #define SET_DISP_TYPE(T) SET_DISP_TYPE_##T | 125 #define SET_DISPLACEMENT_FORMAT(FORMAT) SET_DISPLACEMENT_FORMAT_##FORMAT |
208 #define SET_DISP_TYPE_DISPNONE | 126 #define SET_DISPLACEMENT_FORMAT_DISPNONE |
209 #define SET_DISP_TYPE_DISP8 (instruction_info_collected += DISPLACEMENT_8BIT) | 127 #define SET_DISPLACEMENT_FORMAT_DISP8 \ |
210 #define SET_DISP_TYPE_DISP32 (instruction_info_collected += DISPLACEMENT_32BIT) | 128 (instruction_info_collected += DISPLACEMENT_8BIT) |
211 #define SET_IMM_TYPE(T) SET_IMM_TYPE_##T | 129 #define SET_DISPLACEMENT_FORMAT_DISP32 \ |
212 /* imm2 field is a flag, not accumulator, like with other immediates */ | 130 (instruction_info_collected += DISPLACEMENT_32BIT) |
213 #define SET_IMM_TYPE_IMM2 (instruction_info_collected |= IMMEDIATE_2BIT) | 131 #define SET_IMMEDIATE_FORMAT(FORMAT) SET_IMMEDIATE_FORMAT_##FORMAT |
214 #define SET_IMM_TYPE_IMM8 (instruction_info_collected += IMMEDIATE_8BIT) | 132 /* imm2 field is a flag, not accumulator, like other immediates */ |
215 #define SET_IMM_TYPE_IMM16 (instruction_info_collected += IMMEDIATE_16BIT) | 133 #define SET_IMMEDIATE_FORMAT_IMM2 \ |
216 #define SET_IMM_TYPE_IMM32 (instruction_info_collected += IMMEDIATE_32BIT) | 134 (instruction_info_collected |= IMMEDIATE_2BIT) |
217 #define SET_IMM_TYPE_IMM64 (instruction_info_collected += IMMEDIATE_64BIT) | 135 #define SET_IMMEDIATE_FORMAT_IMM8 \ |
218 #define SET_IMM2_TYPE(T) SET_IMM2_TYPE_##T | 136 (instruction_info_collected += IMMEDIATE_8BIT) |
219 #define SET_IMM2_TYPE_IMM8 \ | 137 #define SET_IMMEDIATE_FORMAT_IMM16 \ |
| 138 (instruction_info_collected += IMMEDIATE_16BIT) |
| 139 #define SET_IMMEDIATE_FORMAT_IMM32 \ |
| 140 (instruction_info_collected += IMMEDIATE_32BIT) |
| 141 #define SET_IMMEDIATE_FORMAT_IMM64 \ |
| 142 (instruction_info_collected += IMMEDIATE_64BIT) |
| 143 #define SET_SECOND_IMMEDIATE_FORMAT(FORMAT) \ |
| 144 SET_SECOND_IMMEDIATE_FORMAT_##FORMAT |
| 145 #define SET_SECOND_IMMEDIATE_FORMAT_IMM8 \ |
220 (instruction_info_collected += SECOND_IMMEDIATE_8BIT) | 146 (instruction_info_collected += SECOND_IMMEDIATE_8BIT) |
221 #define SET_IMM2_TYPE_IMM16 \ | 147 #define SET_SECOND_IMMEDIATE_FORMAT_IMM16 \ |
222 (instruction_info_collected += SECOND_IMMEDIATE_16BIT) | 148 (instruction_info_collected += SECOND_IMMEDIATE_16BIT) |
223 | 149 |
224 /* Mark the destination of a jump instruction and make an early validity check: | 150 /* |
225 * to jump outside given code region, the target address must be aligned. | 151 * Mark the destination of a jump instruction and make an early validity check: |
| 152 * jump target outside of given code region must be aligned. |
226 * | 153 * |
227 * Returns TRUE iff the jump passes the early validity check. | 154 * Returns TRUE iff the jump passes the early validity check. |
228 */ | 155 */ |
229 static FORCEINLINE int MarkJumpTarget(size_t jump_dest, | 156 static FORCEINLINE int MarkJumpTarget(size_t jump_dest, |
230 bitmap_word *jump_dests, | 157 bitmap_word *jump_dests, |
231 size_t size) { | 158 size_t size) { |
232 if ((jump_dest & kBundleMask) == 0) { | 159 if ((jump_dest & kBundleMask) == 0) { |
233 return TRUE; | 160 return TRUE; |
234 } | 161 } |
235 if (jump_dest >= size) { | 162 if (jump_dest >= size) { |
(...skipping 22 matching lines...) Expand all Loading... |
258 /* | 185 /* |
259 * Mark the given addresses as invalid jump target addresses (that is: unmark | 186 * Mark the given addresses as invalid jump target addresses (that is: unmark |
260 * them). | 187 * them). |
261 */ | 188 */ |
262 static FORCEINLINE void UnmarkValidJumpTargets(size_t address, | 189 static FORCEINLINE void UnmarkValidJumpTargets(size_t address, |
263 size_t bytes, | 190 size_t bytes, |
264 bitmap_word *valid_targets) { | 191 bitmap_word *valid_targets) { |
265 BitmapClearBits(valid_targets, address, bytes); | 192 BitmapClearBits(valid_targets, address, bytes); |
266 } | 193 } |
267 | 194 |
| 195 /* |
| 196 * Compare valid_targets and jump_dests and call callback for any address in |
| 197 * jump_dests which is not present in valid_targets. |
| 198 */ |
268 static INLINE Bool ProcessInvalidJumpTargets( | 199 static INLINE Bool ProcessInvalidJumpTargets( |
269 const uint8_t *data, | 200 const uint8_t codeblock[], |
270 size_t size, | 201 size_t size, |
271 bitmap_word *valid_targets, | 202 bitmap_word *valid_targets, |
272 bitmap_word *jump_dests, | 203 bitmap_word *jump_dests, |
273 ValidationCallbackFunc user_callback, | 204 ValidationCallbackFunc user_callback, |
274 void *callback_data) { | 205 void *callback_data) { |
275 size_t elements = (size + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE; | 206 size_t elements = (size + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE; |
276 size_t i, j; | 207 size_t i, j; |
277 Bool result = TRUE; | 208 Bool result = TRUE; |
278 | 209 |
279 for (i = 0; i < elements ; i++) { | 210 for (i = 0; i < elements; i++) { |
280 bitmap_word jump_dest_mask = jump_dests[i]; | 211 bitmap_word jump_dest_mask = jump_dests[i]; |
281 bitmap_word valid_target_mask = valid_targets[i]; | 212 bitmap_word valid_target_mask = valid_targets[i]; |
282 if ((jump_dest_mask & ~valid_target_mask) != 0) { | 213 if ((jump_dest_mask & ~valid_target_mask) != 0) { |
283 for (j = i * NACL_HOST_WORDSIZE; j < (i + 1) * NACL_HOST_WORDSIZE; j++) | 214 for (j = i * NACL_HOST_WORDSIZE; j < (i + 1) * NACL_HOST_WORDSIZE; j++) |
284 if (BitmapIsBitSet(jump_dests, j) && | 215 if (BitmapIsBitSet(jump_dests, j) && |
285 !BitmapIsBitSet(valid_targets, j)) { | 216 !BitmapIsBitSet(valid_targets, j)) { |
286 result &= user_callback(data + j, | 217 result &= user_callback(codeblock + j, |
287 data + j, | 218 codeblock + j, |
288 BAD_JUMP_TARGET, | 219 BAD_JUMP_TARGET, |
289 callback_data); | 220 callback_data); |
290 } | 221 } |
291 } | 222 } |
292 } | 223 } |
293 | 224 |
294 return result; | 225 return result; |
295 } | 226 } |
296 | 227 |
297 | 228 |
298 /* | 229 /* |
299 * Process rel8_operand. Note: rip points to the beginning of the next | 230 * Process rel8_operand. Note: rip points to the beginning of the next |
300 * instruction here and x86 encoding guarantees rel8 field is the last one | 231 * instruction here and x86 encoding guarantees rel8 field is the last one |
301 * in a current instruction. | 232 * in a current instruction. |
302 */ | 233 */ |
303 static FORCEINLINE void Rel8Operand(const uint8_t *rip, | 234 static FORCEINLINE void Rel8Operand(const uint8_t *rip, |
304 const uint8_t* codeblock_start, | 235 const uint8_t codeblock[], |
305 bitmap_word *jump_dests, | 236 bitmap_word *jump_dests, |
306 size_t jumpdests_size, | 237 size_t jumpdests_size, |
307 uint32_t *instruction_info_collected) { | 238 uint32_t *instruction_info_collected) { |
308 int8_t offset = (uint8_t) (rip[-1]); | 239 int8_t offset = rip[-1]; |
309 size_t jump_dest = offset + (rip - codeblock_start); | 240 size_t jump_dest = offset + (rip - codeblock); |
310 | 241 |
311 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) | 242 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) |
312 *instruction_info_collected |= RELATIVE_8BIT; | 243 *instruction_info_collected |= RELATIVE_8BIT; |
313 else | 244 else |
314 *instruction_info_collected |= RELATIVE_8BIT | DIRECT_JUMP_OUT_OF_RANGE; | 245 *instruction_info_collected |= RELATIVE_8BIT | DIRECT_JUMP_OUT_OF_RANGE; |
315 } | 246 } |
316 | 247 |
317 /* | 248 /* |
318 * Process rel32_operand. Note: rip points to the beginning of the next | 249 * Process rel32_operand. Note: rip points to the beginning of the next |
319 * instruction here and x86 encoding guarantees rel32 field is the last one | 250 * instruction here and x86 encoding guarantees rel32 field is the last one |
320 * in a current instruction. | 251 * in a current instruction. |
321 */ | 252 */ |
322 static FORCEINLINE void Rel32Operand(const uint8_t *rip, | 253 static FORCEINLINE void Rel32Operand(const uint8_t *rip, |
323 const uint8_t* codeblock_start, | 254 const uint8_t codeblock[], |
324 bitmap_word *jump_dests, | 255 bitmap_word *jump_dests, |
325 size_t jumpdests_size, | 256 size_t jumpdests_size, |
326 uint32_t *instruction_info_collected) { | 257 uint32_t *instruction_info_collected) { |
327 int32_t offset = (rip[-4] + 256U * (rip[-3] + 256U * ( | 258 int32_t offset = |
328 rip[-2] + 256U * ((uint32_t) rip[-1])))); | 259 rip[-4] + 256U * (rip[-3] + 256U * (rip[-2] + 256U * (rip[-1]))); |
329 size_t jump_dest = offset + (rip - codeblock_start); | 260 size_t jump_dest = offset + (rip - codeblock); |
330 | 261 |
331 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) | 262 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size)) |
332 *instruction_info_collected |= RELATIVE_32BIT; | 263 *instruction_info_collected |= RELATIVE_32BIT; |
333 else | 264 else |
334 *instruction_info_collected |= RELATIVE_32BIT | DIRECT_JUMP_OUT_OF_RANGE; | 265 *instruction_info_collected |= RELATIVE_32BIT | DIRECT_JUMP_OUT_OF_RANGE; |
335 } | 266 } |
336 | 267 |
337 #endif /* NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ */ | 268 #endif /* NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ */ |
OLD | NEW |