Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/trusted/validator_ragel/bitmap.h

Issue 12226019: Move bitmap manipulation functions in bitmap.h (Closed) Base URL: svn://svn.chromium.org/native_client/trunk/src/native_client/
Patch Set: Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/trusted/validator_ragel/gen/validator_x86_32.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. 2 * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be 3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file. 4 * found in the LICENSE file.
5 */ 5 */
6 6
7 /* 7 /*
8 * This file contains common parts of x86-32 and x86-64 internals (inline 8 * This file contains bitmap array manipulation functions.
9 * functions and defines).
10 */ 9 */
11 10
12 #ifndef NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ 11 #ifndef NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_BITMAP_H_
13 #define NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ 12 #define NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_BITMAP_H_
14 13
15 #include "native_client/src/shared/platform/nacl_check.h" 14 #include "native_client/src/include/portability.h"
16 #include "native_client/src/shared/utils/types.h"
17 #include "native_client/src/trusted/validator_ragel/unreviewed/decoding.h"
18 #include "native_client/src/trusted/validator_ragel/validator.h"
19 15
20 /* Maximum set of R-DFA allowable CPUID features. */ 16 #if NACL_WINDOWS
21 extern const NaClCPUFeaturesX86 kValidatorCPUIDFeatures; 17 # define FORCEINLINE __forceinline
22 18 #else
23 /* Macroses to suppport CPUID handling. */ 19 # define FORCEINLINE __inline __attribute__ ((always_inline))
24 #define SET_CPU_FEATURE(F) \ 20 #endif
25 if (!(F##_Allowed)) { \
26 instruction_info_collected |= UNRECOGNIZED_INSTRUCTION; \
27 } \
28 if (!(F)) { \
29 instruction_info_collected |= CPUID_UNSUPPORTED_INSTRUCTION; \
30 }
31 #define CPUFeature_3DNOW cpu_features->data[NaClCPUFeatureX86_3DNOW]
32 /*
33 * AMD documentation claims it's always available if CPUFeature_LM is present,
34 * But Intel documentation does not even mention it!
35 * Keep it as 3DNow! instruction.
36 */
37 #define CPUFeature_3DPRFTCH CPUFeature_3DNOW || CPUFeature_PRE
38 #define CPUFeature_AES cpu_features->data[NaClCPUFeatureX86_AES]
39 #define CPUFeature_AESAVX CPUFeature_AES && CPUFeature_AVX
40 #define CPUFeature_AVX cpu_features->data[NaClCPUFeatureX86_AVX]
41 #define CPUFeature_BMI1 cpu_features->data[NaClCPUFeatureX86_BMI1]
42 #define CPUFeature_CLFLUSH cpu_features->data[NaClCPUFeatureX86_CLFLUSH]
43 #define CPUFeature_CLMUL cpu_features->data[NaClCPUFeatureX86_CLMUL]
44 #define CPUFeature_CLMULAVX CPUFeature_CLMUL && CPUFeature_AVX
45 #define CPUFeature_CMOV cpu_features->data[NaClCPUFeatureX86_CMOV]
46 #define CPUFeature_CMOVx87 CPUFeature_CMOV && CPUFeature_x87
47 #define CPUFeature_CX16 cpu_features->data[NaClCPUFeatureX86_CX16]
48 #define CPUFeature_CX8 cpu_features->data[NaClCPUFeatureX86_CX8]
49 #define CPUFeature_E3DNOW cpu_features->data[NaClCPUFeatureX86_E3DNOW]
50 #define CPUFeature_EMMX cpu_features->data[NaClCPUFeatureX86_EMMX]
51 #define CPUFeature_EMMXSSE CPUFeature_EMMX || CPUFeature_SSE
52 #define CPUFeature_F16C cpu_features->data[NaClCPUFeatureX86_F16C]
53 #define CPUFeature_FMA cpu_features->data[NaClCPUFeatureX86_FMA]
54 #define CPUFeature_FMA4 cpu_features->data[NaClCPUFeatureX86_FMA4]
55 #define CPUFeature_FXSR cpu_features->data[NaClCPUFeatureX86_FXSR]
56 #define CPUFeature_LAHF cpu_features->data[NaClCPUFeatureX86_LAHF]
57 #define CPUFeature_LM cpu_features->data[NaClCPUFeatureX86_LM]
58 #define CPUFeature_LWP cpu_features->data[NaClCPUFeatureX86_LWP]
59 /*
60 * We allow lzcnt unconditionally
61 * See http://code.google.com/p/nativeclient/issues/detail?id=2869
62 */
63 #define CPUFeature_LZCNT TRUE
64 #define CPUFeature_MMX cpu_features->data[NaClCPUFeatureX86_MMX]
65 #define CPUFeature_MON cpu_features->data[NaClCPUFeatureX86_MON]
66 #define CPUFeature_MOVBE cpu_features->data[NaClCPUFeatureX86_MOVBE]
67 #define CPUFeature_OSXSAVE cpu_features->data[NaClCPUFeatureX86_OSXSAVE]
68 #define CPUFeature_POPCNT cpu_features->data[NaClCPUFeatureX86_POPCNT]
69 #define CPUFeature_PRE cpu_features->data[NaClCPUFeatureX86_PRE]
70 #define CPUFeature_SSE cpu_features->data[NaClCPUFeatureX86_SSE]
71 #define CPUFeature_SSE2 cpu_features->data[NaClCPUFeatureX86_SSE2]
72 #define CPUFeature_SSE3 cpu_features->data[NaClCPUFeatureX86_SSE3]
73 #define CPUFeature_SSE41 cpu_features->data[NaClCPUFeatureX86_SSE41]
74 #define CPUFeature_SSE42 cpu_features->data[NaClCPUFeatureX86_SSE42]
75 #define CPUFeature_SSE4A cpu_features->data[NaClCPUFeatureX86_SSE4A]
76 #define CPUFeature_SSSE3 cpu_features->data[NaClCPUFeatureX86_SSSE3]
77 #define CPUFeature_TBM cpu_features->data[NaClCPUFeatureX86_TBM]
78 #define CPUFeature_TSC cpu_features->data[NaClCPUFeatureX86_TSC]
79 /*
80 * We allow tzcnt unconditionally
81 * See http://code.google.com/p/nativeclient/issues/detail?id=2869
82 */
83 #define CPUFeature_TZCNT TRUE
84 #define CPUFeature_x87 cpu_features->data[NaClCPUFeatureX86_x87]
85 #define CPUFeature_XOP cpu_features->data[NaClCPUFeatureX86_XOP]
86
87 #define CPUFeature_3DNOW_Allowed \
88 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_3DNOW]
89 /*
90 * AMD documentation claims it's always available if CPUFeature_LM is present,
91 * But Intel documentation does not even mention it!
92 * Keep it as 3DNow! instruction.
93 */
94 #define CPUFeature_3DPRFTCH_Allowed \
95 CPUFeature_3DNOW_Allowed || CPUFeature_PRE_Allowed
96 #define CPUFeature_AES_Allowed \
97 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_AES]
98 #define CPUFeature_AESAVX_Allowed \
99 CPUFeature_AES_Allowed && CPUFeature_AVX_Allowed
100 #define CPUFeature_AVX_Allowed \
101 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_AVX]
102 #define CPUFeature_BMI1_Allowed \
103 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_BMI1]
104 #define CPUFeature_CLFLUSH_Allowed \
105 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CLFLUSH]
106 #define CPUFeature_CLMUL_Allowed \
107 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CLMUL]
108 #define CPUFeature_CLMULAVX_Allowed \
109 CPUFeature_CLMUL_Allowed && CPUFeature_AVX_Allowed
110 #define CPUFeature_CMOV_Allowed \
111 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CMOV]
112 #define CPUFeature_CMOVx87_Allowed \
113 CPUFeature_CMOV_Allowed && CPUFeature_x87_Allowed
114 #define CPUFeature_CX16_Allowed \
115 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CX16]
116 #define CPUFeature_CX8_Allowed \
117 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_CX8]
118 #define CPUFeature_E3DNOW_Allowed \
119 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_E3DNOW]
120 #define CPUFeature_EMMX_Allowed \
121 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_EMMX]
122 #define CPUFeature_EMMXSSE_Allowed \
123 CPUFeature_EMMX_Allowed || CPUFeature_SSE_Allowed
124 #define CPUFeature_F16C_Allowed \
125 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_F16C]
126 #define CPUFeature_FMA_Allowed \
127 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_FMA]
128 #define CPUFeature_FMA4_Allowed \
129 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_FMA4]
130 #define CPUFeature_FXSR_Allowed \
131 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_FXSR]
132 #define CPUFeature_LAHF_Allowed \
133 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_LAHF]
134 #define CPUFeature_LM_Allowed \
135 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_LM]
136 #define CPUFeature_LWP_Allowed \
137 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_LWP]
138 /*
139 * We allow lzcnt unconditionally
140 * See http://code.google.com/p/nativeclient/issues/detail?id=2869
141 */
142 #define CPUFeature_LZCNT_Allowed TRUE
143 #define CPUFeature_MMX_Allowed \
144 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_MMX]
145 #define CPUFeature_MON_Allowed \
146 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_MON]
147 #define CPUFeature_MOVBE_Allowed \
148 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_MOVBE]
149 #define CPUFeature_OSXSAVE_Allowed \
150 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_OSXSAVE]
151 #define CPUFeature_POPCNT_Allowed \
152 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_POPCNT]
153 #define CPUFeature_PRE_Allowed \
154 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_PRE]
155 #define CPUFeature_SSE_Allowed \
156 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE]
157 #define CPUFeature_SSE2_Allowed \
158 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE2]
159 #define CPUFeature_SSE3_Allowed \
160 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE3]
161 #define CPUFeature_SSE41_Allowed \
162 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE41]
163 #define CPUFeature_SSE42_Allowed \
164 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE42]
165 #define CPUFeature_SSE4A_Allowed \
166 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSE4A]
167 #define CPUFeature_SSSE3_Allowed \
168 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_SSSE3]
169 #define CPUFeature_TBM_Allowed \
170 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_TBM]
171 #define CPUFeature_TSC_Allowed \
172 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_TSC]
173 /*
174 * We allow tzcnt unconditionally
175 * See http://code.google.com/p/nativeclient/issues/detail?id=2869
176 */
177 #define CPUFeature_TZCNT_Allowed TRUE
178 #define CPUFeature_x87_Allowed \
179 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_x87]
180 #define CPUFeature_XOP_Allowed \
181 kValidatorCPUIDFeatures.data[NaClCPUFeatureX86_XOP]
182
183 /* Remember some information about instruction for further processing. */
184 #define GET_REX_PREFIX() rex_prefix
185 #define SET_REX_PREFIX(P) rex_prefix = (P)
186 #define GET_VEX_PREFIX2() vex_prefix2
187 #define SET_VEX_PREFIX2(P) vex_prefix2 = (P)
188 #define GET_VEX_PREFIX3() vex_prefix3
189 #define SET_VEX_PREFIX3(P) vex_prefix3 = (P)
190 #define SET_MODRM_BASE(N) base = (N)
191 #define SET_MODRM_INDEX(N) index = (N)
192
193 enum OperandKind {
194 OPERAND_SANDBOX_IRRELEVANT = 0,
195 /*
196 * Currently we do not distinguish 8bit and 16bit modifications from
197 * OPERAND_SANDBOX_UNRESTRICTED to match the behavior of the old validator.
198 *
199 * 8bit operands must be distinguished from other types because the REX prefix
200 * regulates the choice between %ah and %spl, as well as %ch and %bpl.
201 */
202 OPERAND_SANDBOX_8BIT,
203 OPERAND_SANDBOX_RESTRICTED,
204 OPERAND_SANDBOX_UNRESTRICTED
205 };
206
207 #define SET_OPERAND_NAME(N, S) operand_states |= ((S) << ((N) << 3))
208 #define SET_OPERAND_TYPE(N, T) SET_OPERAND_TYPE_ ## T(N)
209 #define SET_OPERAND_TYPE_OPERAND_TYPE_8_BIT(N) \
210 operand_states |= OPERAND_SANDBOX_8BIT << (5 + ((N) << 3))
211 #define SET_OPERAND_TYPE_OPERAND_TYPE_16_BIT(N) \
212 operand_states |= OPERAND_SANDBOX_UNRESTRICTED << (5 + ((N) << 3))
213 #define SET_OPERAND_TYPE_OPERAND_TYPE_32_BIT(N) \
214 operand_states |= OPERAND_SANDBOX_RESTRICTED << (5 + ((N) << 3))
215 #define SET_OPERAND_TYPE_OPERAND_TYPE_64_BIT(N) \
216 operand_states |= OPERAND_SANDBOX_UNRESTRICTED << (5 + ((N) << 3))
217 #define CHECK_OPERAND(N, S, T) \
218 ((operand_states & (0xff << ((N) << 3))) == ((S | (T << 5)) << ((N) << 3)))
219
220 /* Ignore this information for now. */
221 #define SET_DATA16_PREFIX(S)
222 #define SET_REPZ_PREFIX(S)
223 #define SET_REPNZ_PREFIX(S)
224 #define SET_MODRM_SCALE(S)
225 #define SET_DISP_PTR(P)
226 #define SET_IMM_PTR(P)
227 #define SET_IMM2_PTR(P)
228
229 /*
230 * Collect information about anyfields (offsets and immediates).
231 * Note: we use += below instead of |=. This means two immediate fields will
232 * be treated as one. It's not important for safety.
233 */
234 #define SET_DISP_TYPE(T) SET_DISP_TYPE_##T
235 #define SET_DISP_TYPE_DISPNONE
236 #define SET_DISP_TYPE_DISP8 (instruction_info_collected += DISPLACEMENT_8BIT)
237 #define SET_DISP_TYPE_DISP32 (instruction_info_collected += DISPLACEMENT_32BIT)
238 #define SET_IMM_TYPE(T) SET_IMM_TYPE_##T
239 /* imm2 field is a flag, not accumulator, like with other immediates */
240 #define SET_IMM_TYPE_IMM2 (instruction_info_collected |= IMMEDIATE_2BIT)
241 #define SET_IMM_TYPE_IMM8 (instruction_info_collected += IMMEDIATE_8BIT)
242 #define SET_IMM_TYPE_IMM16 (instruction_info_collected += IMMEDIATE_16BIT)
243 #define SET_IMM_TYPE_IMM32 (instruction_info_collected += IMMEDIATE_32BIT)
244 #define SET_IMM_TYPE_IMM64 (instruction_info_collected += IMMEDIATE_64BIT)
245 #define SET_IMM2_TYPE(T) SET_IMM2_TYPE_##T
246 #define SET_IMM2_TYPE_IMM8 \
247 (instruction_info_collected += SECOND_IMMEDIATE_8BIT)
248 #define SET_IMM2_TYPE_IMM16 \
249 (instruction_info_collected += SECOND_IMMEDIATE_16BIT)
250 21
251 #define BITMAP_WORD_NAME BITMAP_WORD_NAME1(NACL_HOST_WORDSIZE) 22 #define BITMAP_WORD_NAME BITMAP_WORD_NAME1(NACL_HOST_WORDSIZE)
252 #define BITMAP_WORD_NAME1(size) BITMAP_WORD_NAME2(size) 23 #define BITMAP_WORD_NAME1(size) BITMAP_WORD_NAME2(size)
253 #define BITMAP_WORD_NAME2(size) uint##size##_t 24 #define BITMAP_WORD_NAME2(size) uint##size##_t
254 25
255 typedef BITMAP_WORD_NAME bitmap_word; 26 typedef BITMAP_WORD_NAME bitmap_word;
256 27
257 static INLINE bitmap_word *BitmapAllocate(size_t indexes) { 28 static INLINE bitmap_word *BitmapAllocate(size_t indexes) {
258 bitmap_word *bitmap; 29 NACL_COMPILE_TIME_ASSERT((NACL_HOST_WORDSIZE / 8) == sizeof(bitmap_word));
259 size_t byte_count = ((indexes + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE)* 30 size_t word_count = ((indexes + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE);
260 sizeof *bitmap; 31 return calloc(sizeof(bitmap_word), word_count);
261 bitmap = malloc(byte_count);
262 if (bitmap != NULL) {
263 memset(bitmap, 0, byte_count);
264 }
265 return bitmap;
266 } 32 }
267 33
268 static FORCEINLINE int BitmapIsBitSet(bitmap_word *bitmap, size_t index) { 34 static FORCEINLINE int BitmapIsBitSet(bitmap_word *bitmap, size_t index) {
269 return (bitmap[index / NACL_HOST_WORDSIZE] & 35 return (bitmap[index / NACL_HOST_WORDSIZE] &
270 (((bitmap_word)1) << (index % NACL_HOST_WORDSIZE))) != 0; 36 (((bitmap_word)1) << (index % NACL_HOST_WORDSIZE))) != 0;
271 } 37 }
272 38
273 static FORCEINLINE void BitmapSetBit(bitmap_word *bitmap, size_t index) { 39 static FORCEINLINE void BitmapSetBit(bitmap_word *bitmap, size_t index) {
274 bitmap[index / NACL_HOST_WORDSIZE] |= 40 bitmap[index / NACL_HOST_WORDSIZE] |=
275 ((bitmap_word)1) << (index % NACL_HOST_WORDSIZE); 41 ((bitmap_word)1) << (index % NACL_HOST_WORDSIZE);
(...skipping 19 matching lines...) Expand all
295 ((((bitmap_word)1) << bits) - 1) << (index % NACL_HOST_WORDSIZE); 61 ((((bitmap_word)1) << bits) - 1) << (index % NACL_HOST_WORDSIZE);
296 } 62 }
297 63
298 /* All the bits must be in a single 32-bit bundle. */ 64 /* All the bits must be in a single 32-bit bundle. */
299 static FORCEINLINE void BitmapClearBits(bitmap_word *bitmap, 65 static FORCEINLINE void BitmapClearBits(bitmap_word *bitmap,
300 size_t index, size_t bits) { 66 size_t index, size_t bits) {
301 bitmap[index / NACL_HOST_WORDSIZE] &= 67 bitmap[index / NACL_HOST_WORDSIZE] &=
302 ~(((((bitmap_word)1) << bits) - 1) << (index % NACL_HOST_WORDSIZE)); 68 ~(((((bitmap_word)1) << bits) - 1) << (index % NACL_HOST_WORDSIZE));
303 } 69 }
304 70
305 /* Mark the destination of a jump instruction and make an early validity check:
306 * to jump outside given code region, the target address must be aligned.
307 *
308 * Returns TRUE iff the jump passes the early validity check.
309 */
310 static FORCEINLINE int MarkJumpTarget(size_t jump_dest,
311 bitmap_word *jump_dests,
312 size_t size) {
313 if ((jump_dest & kBundleMask) == 0) {
314 return TRUE;
315 }
316 if (jump_dest >= size) {
317 return FALSE;
318 }
319 BitmapSetBit(jump_dests, jump_dest);
320 return TRUE;
321 }
322
323 /*
324 * Mark the given address as valid jump target address.
325 */
326 static FORCEINLINE void MarkValidJumpTarget(size_t address,
327 bitmap_word *valid_targets) {
328 BitmapSetBit(valid_targets, address);
329 }
330
331 /*
332 * Mark the given address as invalid jump target address (that is: unmark it).
333 */
334 static FORCEINLINE void UnmarkValidJumpTarget(size_t address,
335 bitmap_word *valid_targets) {
336 BitmapClearBit(valid_targets, address);
337 }
338
339 /*
340 * Mark the given addresses as invalid jump target addresses (that is: unmark
341 * them).
342 */
343 static FORCEINLINE void UnmarkValidJumpTargets(size_t address,
344 size_t bytes,
345 bitmap_word *valid_targets) {
346 BitmapClearBits(valid_targets, address, bytes);
347 }
348
349 static INLINE Bool ProcessInvalidJumpTargets(
350 const uint8_t *data,
351 size_t size,
352 bitmap_word *valid_targets,
353 bitmap_word *jump_dests,
354 ValidationCallbackFunc user_callback,
355 void *callback_data) {
356 size_t elements = (size + NACL_HOST_WORDSIZE - 1) / NACL_HOST_WORDSIZE;
357 size_t i, j;
358 Bool result = TRUE;
359
360 for (i = 0; i < elements ; i++) {
361 bitmap_word jump_dest_mask = jump_dests[i];
362 bitmap_word valid_target_mask = valid_targets[i];
363 if ((jump_dest_mask & ~valid_target_mask) != 0) {
364 for (j = i * NACL_HOST_WORDSIZE; j < (i + 1) * NACL_HOST_WORDSIZE; j++)
365 if (BitmapIsBitSet(jump_dests, j) &&
366 !BitmapIsBitSet(valid_targets, j)) {
367 result &= user_callback(data + j,
368 data + j,
369 BAD_JUMP_TARGET,
370 callback_data);
371 }
372 }
373 }
374
375 return result;
376 }
377
378
379 /*
380 * Process rel8_operand. Note: rip points to the beginning of the next
381 * instruction here and x86 encoding guarantees rel8 field is the last one
382 * in a current instruction.
383 */
384 static FORCEINLINE void Rel8Operand(const uint8_t *rip,
385 const uint8_t* codeblock_start,
386 bitmap_word *jump_dests,
387 size_t jumpdests_size,
388 uint32_t *instruction_info_collected) {
389 int8_t offset = (uint8_t) (rip[-1]);
390 size_t jump_dest = offset + (rip - codeblock_start);
391
392 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size))
393 *instruction_info_collected |= RELATIVE_8BIT;
394 else
395 *instruction_info_collected |= RELATIVE_8BIT | DIRECT_JUMP_OUT_OF_RANGE;
396 }
397
398 /*
399 * Process rel32_operand. Note: rip points to the beginning of the next
400 * instruction here and x86 encoding guarantees rel32 field is the last one
401 * in a current instruction.
402 */
403 static FORCEINLINE void Rel32Operand(const uint8_t *rip,
404 const uint8_t* codeblock_start,
405 bitmap_word *jump_dests,
406 size_t jumpdests_size,
407 uint32_t *instruction_info_collected) {
408 int32_t offset = (rip[-4] + 256U * (rip[-3] + 256U * (
409 rip[-2] + 256U * ((uint32_t) rip[-1]))));
410 size_t jump_dest = offset + (rip - codeblock_start);
411
412 if (MarkJumpTarget(jump_dest, jump_dests, jumpdests_size))
413 *instruction_info_collected |= RELATIVE_32BIT;
414 else
415 *instruction_info_collected |= RELATIVE_32BIT | DIRECT_JUMP_OUT_OF_RANGE;
416 }
417
418 static INLINE void CheckAccess(ptrdiff_t instruction_start,
419 enum OperandName base,
420 enum OperandName index,
421 uint8_t restricted_register,
422 bitmap_word *valid_targets,
423 uint32_t *instruction_info_collected) {
424 if ((base == REG_RIP) || (base == REG_R15) ||
425 (base == REG_RSP) || (base == REG_RBP)) {
426 if ((index == NO_REG) || (index == REG_RIZ))
427 { /* do nothing. */ }
428 else if (index == restricted_register)
429 BitmapClearBit(valid_targets, instruction_start),
430 *instruction_info_collected |= RESTRICTED_REGISTER_USED;
431 else
432 *instruction_info_collected |= UNRESTRICTED_INDEX_REGISTER;
433 } else {
434 *instruction_info_collected |= FORBIDDEN_BASE_REGISTER;
435 }
436 }
437
438
439 static INLINE void Process0Operands(enum OperandName *restricted_register,
440 uint32_t *instruction_info_collected) {
441 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special
442 * instruction, not with regular instruction. */
443 if (*restricted_register == REG_RSP) {
444 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED;
445 } else if (*restricted_register == REG_RBP) {
446 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED;
447 }
448 *restricted_register = NO_REG;
449 }
450
451 static INLINE void Process1Operand(enum OperandName *restricted_register,
452 uint32_t *instruction_info_collected,
453 uint8_t rex_prefix,
454 uint32_t operand_states) {
455 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special
456 * instruction, not with regular instruction. */
457 if (*restricted_register == REG_RSP) {
458 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED;
459 } else if (*restricted_register == REG_RBP) {
460 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED;
461 }
462 *restricted_register = NO_REG;
463 if (CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_8BIT) ||
464 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_RESTRICTED) ||
465 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_UNRESTRICTED)) {
466 *instruction_info_collected |= R15_MODIFIED;
467 } else if ((CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
468 CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_RESTRICTED) ||
469 CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_UNRESTRICTED)) {
470 *instruction_info_collected |= BPL_MODIFIED;
471 } else if ((CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
472 CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_RESTRICTED) ||
473 CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_UNRESTRICTED)) {
474 *instruction_info_collected |= SPL_MODIFIED;
475 }
476 }
477
478 static INLINE void Process1OperandZeroExtends(
479 enum OperandName *restricted_register,
480 uint32_t *instruction_info_collected,
481 uint8_t rex_prefix,
482 uint32_t operand_states) {
483 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special
484 * instruction, not with regular instruction. */
485 if (*restricted_register == REG_RSP) {
486 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED;
487 } else if (*restricted_register == REG_RBP) {
488 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED;
489 }
490 *restricted_register = NO_REG;
491 if (CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_8BIT) ||
492 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_RESTRICTED) ||
493 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_UNRESTRICTED)) {
494 *instruction_info_collected |= R15_MODIFIED;
495 } else if ((CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
496 CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_UNRESTRICTED)) {
497 *instruction_info_collected |= BPL_MODIFIED;
498 } else if ((CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
499 CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_UNRESTRICTED)) {
500 *instruction_info_collected |= SPL_MODIFIED;
501 /* Take 2 bits of operand type from operand_states as *restricted_register,
502 * make sure operand_states denotes a register (4th bit == 0). */
503 } else if ((operand_states & 0x70) == (OPERAND_SANDBOX_RESTRICTED << 5)) {
504 *restricted_register = operand_states & 0x0f;
505 }
506 }
507
508 static INLINE void Process2Operands(enum OperandName *restricted_register,
509 uint32_t *instruction_info_collected,
510 uint8_t rex_prefix,
511 uint32_t operand_states) {
512 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special
513 * instruction, not with regular instruction. */
514 if (*restricted_register == REG_RSP) {
515 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED;
516 } else if (*restricted_register == REG_RBP) {
517 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED;
518 }
519 *restricted_register = NO_REG;
520 if (CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_8BIT) ||
521 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_RESTRICTED) ||
522 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_UNRESTRICTED) ||
523 CHECK_OPERAND(1, REG_R15, OPERAND_SANDBOX_8BIT) ||
524 CHECK_OPERAND(1, REG_R15, OPERAND_SANDBOX_RESTRICTED) ||
525 CHECK_OPERAND(1, REG_R15, OPERAND_SANDBOX_UNRESTRICTED)) {
526 *instruction_info_collected |= R15_MODIFIED;
527 } else if ((CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
528 CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_RESTRICTED) ||
529 CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_UNRESTRICTED) ||
530 (CHECK_OPERAND(1, REG_RBP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
531 CHECK_OPERAND(1, REG_RBP, OPERAND_SANDBOX_RESTRICTED) ||
532 CHECK_OPERAND(1, REG_RBP, OPERAND_SANDBOX_UNRESTRICTED)) {
533 *instruction_info_collected |= BPL_MODIFIED;
534 } else if ((CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
535 CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_RESTRICTED) ||
536 CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_UNRESTRICTED) ||
537 (CHECK_OPERAND(1, REG_RSP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
538 CHECK_OPERAND(1, REG_RSP, OPERAND_SANDBOX_RESTRICTED) ||
539 CHECK_OPERAND(1, REG_RSP, OPERAND_SANDBOX_UNRESTRICTED)) {
540 *instruction_info_collected |= SPL_MODIFIED;
541 }
542 }
543
544 static INLINE void Process2OperandsZeroExtends(
545 enum OperandName *restricted_register,
546 uint32_t *instruction_info_collected,
547 uint8_t rex_prefix,
548 uint32_t operand_states) {
549 /* Restricted %rsp or %rbp must be processed by appropriate nacl-special
550 * instruction, not with regular instruction. */
551 if (*restricted_register == REG_RSP) {
552 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED;
553 } else if (*restricted_register == REG_RBP) {
554 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED;
555 }
556 *restricted_register = NO_REG;
557 if (CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_8BIT) ||
558 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_RESTRICTED) ||
559 CHECK_OPERAND(0, REG_R15, OPERAND_SANDBOX_UNRESTRICTED) ||
560 CHECK_OPERAND(1, REG_R15, OPERAND_SANDBOX_8BIT) ||
561 CHECK_OPERAND(1, REG_R15, OPERAND_SANDBOX_RESTRICTED) ||
562 CHECK_OPERAND(1, REG_R15, OPERAND_SANDBOX_UNRESTRICTED)) {
563 *instruction_info_collected |= R15_MODIFIED;
564 } else if ((CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
565 CHECK_OPERAND(0, REG_RBP, OPERAND_SANDBOX_UNRESTRICTED) ||
566 (CHECK_OPERAND(1, REG_RBP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
567 CHECK_OPERAND(1, REG_RBP, OPERAND_SANDBOX_UNRESTRICTED)) {
568 *instruction_info_collected |= BPL_MODIFIED;
569 } else if ((CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
570 CHECK_OPERAND(0, REG_RSP, OPERAND_SANDBOX_UNRESTRICTED) ||
571 (CHECK_OPERAND(1, REG_RSP, OPERAND_SANDBOX_8BIT) && rex_prefix) ||
572 CHECK_OPERAND(1, REG_RSP, OPERAND_SANDBOX_UNRESTRICTED)) {
573 *instruction_info_collected |= SPL_MODIFIED;
574 /* Take 2 bits of operand type from operand_states as *restricted_register,
575 * make sure operand_states denotes a register (4th bit == 0). */
576 } else if ((operand_states & 0x70) == (OPERAND_SANDBOX_RESTRICTED << 5)) {
577 *restricted_register = operand_states & 0x0f;
578 if (CHECK_OPERAND(1, REG_RSP, OPERAND_SANDBOX_RESTRICTED)) {
579 *instruction_info_collected |= RESTRICTED_RSP_UNPROCESSED;
580 } else if (CHECK_OPERAND(1, REG_RBP, OPERAND_SANDBOX_RESTRICTED)) {
581 *instruction_info_collected |= RESTRICTED_RBP_UNPROCESSED;
582 }
583 /* Take 2 bits of operand type from operand_states as *restricted_register,
584 * make sure operand_states denotes a register (12th bit == 0). */
585 } else if ((operand_states & 0x7000) == (OPERAND_SANDBOX_RESTRICTED << 13)) {
586 *restricted_register = (operand_states & 0x0f00) >> 8;
587 }
588 }
589
590 /*
591 * This function merges “dangerous” instruction with sandboxing instructions to
592 * get a “superinstruction” and unmarks in-between jump targets.
593 */
594 static INLINE void ExpandSuperinstructionBySandboxingBytes(
595 size_t sandbox_instructions_size,
596 const uint8_t **instruction_start,
597 const uint8_t *data,
598 bitmap_word *valid_targets) {
599 *instruction_start -= sandbox_instructions_size;
600 /*
601 * We need to unmark start of the “dangerous” instruction itself, too, but we
602 * don't need to mark the beginning of the whole “superinstruction” - that's
603 * why we move start by one byte and don't change the length.
604 */
605 UnmarkValidJumpTargets((*instruction_start + 1 - data),
606 sandbox_instructions_size,
607 valid_targets);
608 }
609
610 /*
611 * Return TRUE if naclcall or nacljmp uses the same register in all three
612 * instructions.
613 *
614 * This version is for the case where “add %src_register, %dst_register” with
615 * dst in RM field and src in REG field of ModR/M byte is used.
616 *
617 * There are five possible forms:
618 *
619 * 0: 83 eX e0 and $~0x1f,E86
620 * 3: 4? 01 fX add RBASE,R86
621 * 6: ff eX jmpq *R86
622 * ↑ ↑
623 * instruction_start current_position
624 *
625 * 0: 4? 83 eX e0 and $~0x1f,E86
626 * 4: 4? 01 fX add RBASE,R86
627 * 7: ff eX jmpq *R86
628 * ↑ ↑
629 * instruction_start current_position
630 *
631 * 0: 83 eX e0 and $~0x1f,E86
632 * 3: 4? 01 fX add RBASE,R86
633 * 6: 4? ff eX jmpq *R86
634 * ↑ ↑
635 * instruction_start current_position
636 *
637 * 0: 4? 83 eX e0 and $~0x1f,E86
638 * 4: 4? 01 fX add RBASE,R86
639 * 7: 4? ff eX jmpq *R86
640 * ↑ ↑
641 * instruction_start current_position
642 *
643 * 0: 4? 83 eX e0 and $~0x1f,E64
644 * 4: 4? 01 fX add RBASE,R64
645 * 7: 4? ff eX jmpq *R64
646 * ↑ ↑
647 * instruction_start current_position
648 *
649 * We don't care about “?” (they are checked by DFA).
650 */
651 static INLINE Bool VerifyNaclCallOrJmpAddToRM(const uint8_t *instruction_start,
652 const uint8_t *current_position) {
653 return
654 RMFromModRM(instruction_start[-5]) == RMFromModRM(instruction_start[-1]) &&
655 RMFromModRM(instruction_start[-5]) == RMFromModRM(current_position[0]);
656 }
657
658 /*
659 * Return TRUE if naclcall or nacljmp uses the same register in all three
660 * instructions.
661 *
662 * This version is for the case where “add %src_register, %dst_register” with
663 * dst in REG field and src in RM field of ModR/M byte is used.
664 *
665 * There are five possible forms:
666 *
667 * 0: 83 eX e0 and $~0x1f,E86
668 * 3: 4? 03 Xf add RBASE,R86
669 * 6: ff eX jmpq *R86
670 * ↑ ↑
671 * instruction_start current_position
672 *
673 * 0: 4? 83 eX e0 and $~0x1f,E86
674 * 4: 4? 03 Xf add RBASE,R86
675 * 7: ff eX jmpq *R86
676 * ↑ ↑
677 * instruction_start current_position
678 *
679 * 0: 83 eX e0 and $~0x1f,E86
680 * 3: 4? 03 Xf add RBASE,R86
681 * 6: 4? ff eX jmpq *R86
682 * ↑ ↑
683 * instruction_start current_position
684 *
685 * 0: 4? 83 eX e0 and $~0x1f,E86
686 * 4: 4? 03 Xf add RBASE,R86
687 * 7: 4? ff eX jmpq *R86
688 * ↑ ↑
689 * instruction_start current_position
690 *
691 * 0: 4? 83 eX e0 and $~0x1f,E64
692 * 4: 4? 03 Xf add RBASE,R64
693 * 7: 4? ff eX jmpq *R64
694 * ↑ ↑
695 * instruction_start current_position
696 *
697 * We don't care about “?” (they are checked by DFA).
698 */
699 static INLINE Bool VerifyNaclCallOrJmpAddToReg(
700 const uint8_t *instruction_start,
701 const uint8_t *current_position) {
702 return
703 RMFromModRM(instruction_start[-5]) == RegFromModRM(instruction_start[-1]) &&
704 RMFromModRM(instruction_start[-5]) == RMFromModRM(current_position[0]);
705 }
706
707 /*
708 * This function checks that naclcall or nacljmp are correct (that is: three
709 * component instructions match) and if that is true then it merges call or jmp
710 * with a sandboxing to get a “superinstruction” and removes in-between jump
711 * targets. If it's not true then it triggers “unrecognized instruction” error
712 * condition.
713 *
714 * This version is for the case where “add with dst register in RM field”
715 * (opcode 0x01) and “add without REX prefix” is used.
716 *
717 * There are two possibile forms:
718 *
719 * 0: 83 eX e0 and $~0x1f,E86
720 * 3: 4? 01 fX add RBASE,R86
721 * 6: ff eX jmpq *R86
722 * ↑ ↑
723 * instruction_start current_position
724 *
725 * 0: 83 eX e0 and $~0x1f,E86
726 * 3: 4? 01 fX add RBASE,R86
727 * 6: 4? ff eX jmpq *R86
728 * ↑ ↑
729 * instruction_start current_position
730 */
731 static INLINE void ProcessNaclCallOrJmpAddToRMNoRex(
732 uint32_t *instruction_info_collected,
733 const uint8_t **instruction_start,
734 const uint8_t *current_position,
735 const uint8_t *data,
736 bitmap_word *valid_targets) {
737 if (VerifyNaclCallOrJmpAddToRM(*instruction_start, current_position))
738 ExpandSuperinstructionBySandboxingBytes(
739 3 /* and */ + 3 /* add */, instruction_start, data, valid_targets);
740 else
741 *instruction_info_collected |= UNRECOGNIZED_INSTRUCTION;
742 }
743
744 /*
745 * This function checks that naclcall or nacljmp are correct (that is: three
746 * component instructions match) and if that is true then it merges call or jmp
747 * with a sandboxing to get a “superinstruction” and removes in-between jump
748 * targets. If it's not true then it triggers “unrecognized instruction” error
749 * condition.
750 *
751 * This version is for the case where “add with dst register in REG field”
752 * (opcode 0x03) and “add without REX prefix” is used.
753 *
754 * There are two possibile forms:
755 *
756 * 0: 83 eX e0 and $~0x1f,E86
757 * 3: 4? 03 Xf add RBASE,R86
758 * 6: ff eX jmpq *R86
759 * ↑ ↑
760 * instruction_start current_position
761 *
762 * 0: 83 eX e0 and $~0x1f,E86
763 * 3: 4? 03 Xf add RBASE,R86
764 * 6: 4? ff eX jmpq *R86
765 * ↑ ↑
766 * instruction_start current_position
767 */
768 static INLINE void ProcessNaclCallOrJmpAddToRegNoRex(
769 uint32_t *instruction_info_collected,
770 const uint8_t **instruction_start,
771 const uint8_t *current_position,
772 const uint8_t *data,
773 bitmap_word *valid_targets) {
774 if (VerifyNaclCallOrJmpAddToReg(*instruction_start, current_position))
775 ExpandSuperinstructionBySandboxingBytes(
776 3 /* and */ + 3 /* add */, instruction_start, data, valid_targets);
777 else
778 *instruction_info_collected |= UNRECOGNIZED_INSTRUCTION;
779 }
780
781 /*
782 * This function checks that naclcall or nacljmp are correct (that is: three
783 * component instructions match) and if that is true then it merges call or jmp
784 * with a sandboxing to get a “superinstruction” and removes in-between jump
785 * targets. If it's not true then it triggers “unrecognized instruction” error
786 * condition.
787 *
788 * This version is for the case where “add with dst register in RM field”
789 * (opcode 0x01) and “add without REX prefix” is used.
790 *
791 * There are three possibile forms:
792 *
793 * 0: 4? 83 eX e0 and $~0x1f,E86
794 * 4: 4? 01 fX add RBASE,R86
795 * 7: ff eX jmpq *R86
796 * ↑ ↑
797 * instruction_start current_position
798 *
799 * 0: 4? 83 eX e0 and $~0x1f,E86
800 * 4: 4? 01 fX add RBASE,R86
801 * 7: 4? ff eX jmpq *R86
802 * ↑ ↑
803 * instruction_start current_position
804 *
805 * 0: 4? 83 eX e0 and $~0x1f,E64
806 * 4: 4? 01 fX add RBASE,R64
807 * 7: 4? ff eX jmpq *R64
808 * ↑ ↑
809 * instruction_start current_position
810 */
811 static INLINE void ProcessNaclCallOrJmpAddToRMWithRex(
812 uint32_t *instruction_info_collected,
813 const uint8_t **instruction_start,
814 const uint8_t *current_position,
815 const uint8_t *data,
816 bitmap_word *valid_targets) {
817 if (VerifyNaclCallOrJmpAddToRM(*instruction_start, current_position))
818 ExpandSuperinstructionBySandboxingBytes(
819 4 /* and */ + 3 /* add */, instruction_start, data, valid_targets);
820 else
821 *instruction_info_collected |= UNRECOGNIZED_INSTRUCTION;
822 }
823
824 /*
825 * This function checks that naclcall or nacljmp are correct (that is: three
826 * component instructions match) and if that is true then it merges call or jmp
827 * with a sandboxing to get a “superinstruction” and removes in-between jump
828 * targets. If it's not true then it triggers “unrecognized instruction” error
829 * condition.
830 *
831 * This version is for the case where “add with dst register in REG field”
832 * (opcode 0x03) and “add without REX prefix” is used.
833 *
834 * There are three possibile forms:
835 *
836 * 0: 4? 83 eX e0 and $~0x1f,E86
837 * 4: 4? 03 Xf add RBASE,R86
838 * 7: ff eX jmpq *R86
839 * ↑ ↑
840 * instruction_start current_position
841 *
842 * 0: 4? 83 eX e0 and $~0x1f,E86
843 * 4: 4? 03 Xf add RBASE,R86
844 * 7: 4? ff eX jmpq *R86
845 * ↑ ↑
846 * instruction_start current_position
847 *
848 * 0: 4? 83 eX e0 and $~0x1f,E64
849 * 4: 4? 03 Xf add RBASE,R64
850 * 7: 4? ff eX jmpq *R64
851 * ↑ ↑
852 * instruction_start current_position
853 */
854 static INLINE void ProcessNaclCallOrJmpAddToRegWithRex(
855 uint32_t *instruction_info_collected,
856 const uint8_t **instruction_start,
857 const uint8_t *current_position,
858 const uint8_t *data,
859 bitmap_word *valid_targets) {
860 if (VerifyNaclCallOrJmpAddToReg(*instruction_start, current_position))
861 ExpandSuperinstructionBySandboxingBytes(
862 4 /* and */ + 3 /* add */, instruction_start, data, valid_targets);
863 else
864 *instruction_info_collected |= UNRECOGNIZED_INSTRUCTION;
865 }
866
867 #endif /* NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ */ 71 #endif /* NATIVE_CLIENT_SRC_TRUSTED_VALIDATOR_RAGEL_VALIDATOR_INTERNAL_H_ */
OLDNEW
« no previous file with comments | « no previous file | src/trusted/validator_ragel/gen/validator_x86_32.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698