OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sandbox/linux/seccomp-bpf/syscall.h" | 5 #include "sandbox/linux/seccomp-bpf/syscall.h" |
6 | 6 |
7 #include <asm/unistd.h> | 7 #include <asm/unistd.h> |
8 #include <errno.h> | 8 #include <errno.h> |
9 | 9 |
10 #include "base/basictypes.h" | 10 #include "base/basictypes.h" |
11 | 11 |
12 namespace sandbox { | 12 namespace sandbox { |
13 | 13 |
14 asm( // We need to be able to tell the kernel exactly where we made a | 14 namespace { |
15 // system call. The C++ compiler likes to sometimes clone or | 15 |
16 // inline code, which would inadvertently end up duplicating | 16 asm( // We need to be able to tell the kernel exactly where we made a |
mdempsky
2014/06/13 03:01:46
Nit: This first block of comments no longer aligns
jln (very slow on Chromium)
2014/06/13 06:39:58
Done.
| |
17 // the entry point. | 17 // system call. The C++ compiler likes to sometimes clone or |
18 // "gcc" can suppress code duplication with suitable function | 18 // inline code, which would inadvertently end up duplicating |
19 // attributes, but "clang" doesn't have this ability. | 19 // the entry point. |
20 // The "clang" developer mailing list suggested that the correct | 20 // "gcc" can suppress code duplication with suitable function |
21 // and portable solution is a file-scope assembly block. | 21 // attributes, but "clang" doesn't have this ability. |
22 // N.B. We do mark our code as a proper function so that backtraces | 22 // The "clang" developer mailing list suggested that the correct |
23 // work correctly. But we make absolutely no attempt to use the | 23 // and portable solution is a file-scope assembly block. |
24 // ABI's calling conventions for passing arguments. We will only | 24 // N.B. We do mark our code as a proper function so that backtraces |
25 // ever be called from assembly code and thus can pick more | 25 // work correctly. But we make absolutely no attempt to use the |
26 // suitable calling conventions. | 26 // ABI's calling conventions for passing arguments. We will only |
27 // ever be called from assembly code and thus can pick more | |
28 // suitable calling conventions. | |
27 #if defined(__i386__) | 29 #if defined(__i386__) |
28 ".text\n" | 30 ".text\n" |
29 ".align 16, 0x90\n" | 31 ".align 16, 0x90\n" |
30 ".type SyscallAsm, @function\n" | 32 ".type SyscallAsm, @function\n" |
31 "SyscallAsm:.cfi_startproc\n" | 33 "SyscallAsm:.cfi_startproc\n" |
32 // Check if "%eax" is negative. If so, do not attempt to make a | 34 // Check if "%eax" is negative. If so, do not attempt to make a |
33 // system call. Instead, compute the return address that is visible | 35 // system call. Instead, compute the return address that is visible |
34 // to the kernel after we execute "int $0x80". This address can be | 36 // to the kernel after we execute "int $0x80". This address can be |
35 // used as a marker that BPF code inspects. | 37 // used as a marker that BPF code inspects. |
36 "test %eax, %eax\n" | 38 "test %eax, %eax\n" |
37 "jge 1f\n" | 39 "jge 1f\n" |
38 // Always, make sure that our code is position-independent, or | 40 // Always, make sure that our code is position-independent, or |
39 // address space randomization might not work on i386. This means, | 41 // address space randomization might not work on i386. This means, |
40 // we can't use "lea", but instead have to rely on "call/pop". | 42 // we can't use "lea", but instead have to rely on "call/pop". |
41 "call 0f; .cfi_adjust_cfa_offset 4\n" | 43 "call 0f; .cfi_adjust_cfa_offset 4\n" |
42 "0:pop %eax; .cfi_adjust_cfa_offset -4\n" | 44 "0:pop %eax; .cfi_adjust_cfa_offset -4\n" |
43 "addl $2f-0b, %eax\n" | 45 "addl $2f-0b, %eax\n" |
44 "ret\n" | 46 "ret\n" |
45 // Save register that we don't want to clobber. On i386, we need to | 47 // Save register that we don't want to clobber. On i386, we need to |
46 // save relatively aggressively, as there are a couple or registers | 48 // save relatively aggressively, as there are a couple or registers |
47 // that are used internally (e.g. %ebx for position-independent | 49 // that are used internally (e.g. %ebx for position-independent |
48 // code, and %ebp for the frame pointer), and as we need to keep at | 50 // code, and %ebp for the frame pointer), and as we need to keep at |
49 // least a few registers available for the register allocator. | 51 // least a few registers available for the register allocator. |
50 "1:push %esi; .cfi_adjust_cfa_offset 4\n" | 52 "1:push %esi; .cfi_adjust_cfa_offset 4\n" |
51 "push %edi; .cfi_adjust_cfa_offset 4\n" | 53 "push %edi; .cfi_adjust_cfa_offset 4\n" |
52 "push %ebx; .cfi_adjust_cfa_offset 4\n" | 54 "push %ebx; .cfi_adjust_cfa_offset 4\n" |
53 "push %ebp; .cfi_adjust_cfa_offset 4\n" | 55 "push %ebp; .cfi_adjust_cfa_offset 4\n" |
54 // Copy entries from the array holding the arguments into the | 56 // Copy entries from the array holding the arguments into the |
55 // correct CPU registers. | 57 // correct CPU registers. |
56 "movl 0(%edi), %ebx\n" | 58 "movl 0(%edi), %ebx\n" |
57 "movl 4(%edi), %ecx\n" | 59 "movl 4(%edi), %ecx\n" |
58 "movl 8(%edi), %edx\n" | 60 "movl 8(%edi), %edx\n" |
59 "movl 12(%edi), %esi\n" | 61 "movl 12(%edi), %esi\n" |
60 "movl 20(%edi), %ebp\n" | 62 "movl 20(%edi), %ebp\n" |
61 "movl 16(%edi), %edi\n" | 63 "movl 16(%edi), %edi\n" |
62 // Enter the kernel. | 64 // Enter the kernel. |
63 "int $0x80\n" | 65 "int $0x80\n" |
64 // This is our "magic" return address that the BPF filter sees. | 66 // This is our "magic" return address that the BPF filter sees. |
65 "2:" | 67 "2:" |
66 // Restore any clobbered registers that we didn't declare to the | 68 // Restore any clobbered registers that we didn't declare to the |
67 // compiler. | 69 // compiler. |
68 "pop %ebp; .cfi_adjust_cfa_offset -4\n" | 70 "pop %ebp; .cfi_adjust_cfa_offset -4\n" |
69 "pop %ebx; .cfi_adjust_cfa_offset -4\n" | 71 "pop %ebx; .cfi_adjust_cfa_offset -4\n" |
70 "pop %edi; .cfi_adjust_cfa_offset -4\n" | 72 "pop %edi; .cfi_adjust_cfa_offset -4\n" |
71 "pop %esi; .cfi_adjust_cfa_offset -4\n" | 73 "pop %esi; .cfi_adjust_cfa_offset -4\n" |
72 "ret\n" | 74 "ret\n" |
73 ".cfi_endproc\n" | 75 ".cfi_endproc\n" |
74 "9:.size SyscallAsm, 9b-SyscallAsm\n" | 76 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
75 #elif defined(__x86_64__) | 77 #elif defined(__x86_64__) |
76 ".text\n" | 78 ".text\n" |
77 ".align 16, 0x90\n" | 79 ".align 16, 0x90\n" |
78 ".type SyscallAsm, @function\n" | 80 ".type SyscallAsm, @function\n" |
79 "SyscallAsm:.cfi_startproc\n" | 81 "SyscallAsm:.cfi_startproc\n" |
80 // Check if "%rax" is negative. If so, do not attempt to make a | 82 // Check if "%rax" is negative. If so, do not attempt to make a |
81 // system call. Instead, compute the return address that is visible | 83 // system call. Instead, compute the return address that is visible |
82 // to the kernel after we execute "syscall". This address can be | 84 // to the kernel after we execute "syscall". This address can be |
83 // used as a marker that BPF code inspects. | 85 // used as a marker that BPF code inspects. |
84 "test %rax, %rax\n" | 86 "test %rax, %rax\n" |
85 "jge 1f\n" | 87 "jge 1f\n" |
86 // Always make sure that our code is position-independent, or the | 88 // Always make sure that our code is position-independent, or the |
87 // linker will throw a hissy fit on x86-64. | 89 // linker will throw a hissy fit on x86-64. |
88 "call 0f; .cfi_adjust_cfa_offset 8\n" | 90 "call 0f; .cfi_adjust_cfa_offset 8\n" |
89 "0:pop %rax; .cfi_adjust_cfa_offset -8\n" | 91 "0:pop %rax; .cfi_adjust_cfa_offset -8\n" |
90 "addq $2f-0b, %rax\n" | 92 "addq $2f-0b, %rax\n" |
91 "ret\n" | 93 "ret\n" |
92 // We declared all clobbered registers to the compiler. On x86-64, | 94 // We declared all clobbered registers to the compiler. On x86-64, |
93 // there really isn't much of a problem with register pressure. So, | 95 // there really isn't much of a problem with register pressure. So, |
94 // we can go ahead and directly copy the entries from the arguments | 96 // we can go ahead and directly copy the entries from the arguments |
95 // array into the appropriate CPU registers. | 97 // array into the appropriate CPU registers. |
96 "1:movq 0(%r12), %rdi\n" | 98 "1:movq 0(%r12), %rdi\n" |
97 "movq 8(%r12), %rsi\n" | 99 "movq 8(%r12), %rsi\n" |
98 "movq 16(%r12), %rdx\n" | 100 "movq 16(%r12), %rdx\n" |
99 "movq 24(%r12), %r10\n" | 101 "movq 24(%r12), %r10\n" |
100 "movq 32(%r12), %r8\n" | 102 "movq 32(%r12), %r8\n" |
101 "movq 40(%r12), %r9\n" | 103 "movq 40(%r12), %r9\n" |
102 // Enter the kernel. | 104 // Enter the kernel. |
103 "syscall\n" | 105 "syscall\n" |
104 // This is our "magic" return address that the BPF filter sees. | 106 // This is our "magic" return address that the BPF filter sees. |
105 "2:ret\n" | 107 "2:ret\n" |
106 ".cfi_endproc\n" | 108 ".cfi_endproc\n" |
107 "9:.size SyscallAsm, 9b-SyscallAsm\n" | 109 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
108 #elif defined(__arm__) | 110 #elif defined(__arm__) |
109 // Throughout this file, we use the same mode (ARM vs. thumb) | 111 // Throughout this file, we use the same mode (ARM vs. thumb) |
110 // that the C++ compiler uses. This means, when transfering control | 112 // that the C++ compiler uses. This means, when transfering control |
111 // from C++ to assembly code, we do not need to switch modes (e.g. | 113 // from C++ to assembly code, we do not need to switch modes (e.g. |
112 // by using the "bx" instruction). It also means that our assembly | 114 // by using the "bx" instruction). It also means that our assembly |
113 // code should not be invoked directly from code that lives in | 115 // code should not be invoked directly from code that lives in |
114 // other compilation units, as we don't bother implementing thumb | 116 // other compilation units, as we don't bother implementing thumb |
115 // interworking. That's OK, as we don't make any of the assembly | 117 // interworking. That's OK, as we don't make any of the assembly |
116 // symbols public. They are all local to this file. | 118 // symbols public. They are all local to this file. |
117 ".text\n" | 119 ".text\n" |
118 ".align 2\n" | 120 ".align 2\n" |
119 ".type SyscallAsm, %function\n" | 121 ".type SyscallAsm, %function\n" |
120 #if defined(__thumb__) | 122 #if defined(__thumb__) |
121 ".thumb_func\n" | 123 ".thumb_func\n" |
122 #else | 124 #else |
123 ".arm\n" | 125 ".arm\n" |
124 #endif | 126 #endif |
125 "SyscallAsm:.fnstart\n" | 127 "SyscallAsm:.fnstart\n" |
126 "@ args = 0, pretend = 0, frame = 8\n" | 128 "@ args = 0, pretend = 0, frame = 8\n" |
127 "@ frame_needed = 1, uses_anonymous_args = 0\n" | 129 "@ frame_needed = 1, uses_anonymous_args = 0\n" |
128 #if defined(__thumb__) | 130 #if defined(__thumb__) |
129 ".cfi_startproc\n" | 131 ".cfi_startproc\n" |
130 "push {r7, lr}\n" | 132 "push {r7, lr}\n" |
131 ".cfi_offset 14, -4\n" | 133 ".cfi_offset 14, -4\n" |
132 ".cfi_offset 7, -8\n" | 134 ".cfi_offset 7, -8\n" |
133 "mov r7, sp\n" | 135 "mov r7, sp\n" |
134 ".cfi_def_cfa_register 7\n" | 136 ".cfi_def_cfa_register 7\n" |
135 ".cfi_def_cfa_offset 8\n" | 137 ".cfi_def_cfa_offset 8\n" |
136 #else | 138 #else |
137 "stmfd sp!, {fp, lr}\n" | 139 "stmfd sp!, {fp, lr}\n" |
138 "add fp, sp, #4\n" | 140 "add fp, sp, #4\n" |
139 #endif | 141 #endif |
140 // Check if "r0" is negative. If so, do not attempt to make a | 142 // Check if "r0" is negative. If so, do not attempt to make a |
141 // system call. Instead, compute the return address that is visible | 143 // system call. Instead, compute the return address that is visible |
142 // to the kernel after we execute "swi 0". This address can be | 144 // to the kernel after we execute "swi 0". This address can be |
143 // used as a marker that BPF code inspects. | 145 // used as a marker that BPF code inspects. |
144 "cmp r0, #0\n" | 146 "cmp r0, #0\n" |
145 "bge 1f\n" | 147 "bge 1f\n" |
146 "adr r0, 2f\n" | 148 "adr r0, 2f\n" |
147 "b 2f\n" | 149 "b 2f\n" |
148 // We declared (almost) all clobbered registers to the compiler. On | 150 // We declared (almost) all clobbered registers to the compiler. On |
149 // ARM there is no particular register pressure. So, we can go | 151 // ARM there is no particular register pressure. So, we can go |
150 // ahead and directly copy the entries from the arguments array | 152 // ahead and directly copy the entries from the arguments array |
151 // into the appropriate CPU registers. | 153 // into the appropriate CPU registers. |
152 "1:ldr r5, [r6, #20]\n" | 154 "1:ldr r5, [r6, #20]\n" |
153 "ldr r4, [r6, #16]\n" | 155 "ldr r4, [r6, #16]\n" |
154 "ldr r3, [r6, #12]\n" | 156 "ldr r3, [r6, #12]\n" |
155 "ldr r2, [r6, #8]\n" | 157 "ldr r2, [r6, #8]\n" |
156 "ldr r1, [r6, #4]\n" | 158 "ldr r1, [r6, #4]\n" |
157 "mov r7, r0\n" | 159 "mov r7, r0\n" |
158 "ldr r0, [r6, #0]\n" | 160 "ldr r0, [r6, #0]\n" |
159 // Enter the kernel | 161 // Enter the kernel |
160 "swi 0\n" | 162 "swi 0\n" |
161 // Restore the frame pointer. Also restore the program counter from | 163 // Restore the frame pointer. Also restore the program counter from |
162 // the link register; this makes us return to the caller. | 164 // the link register; this makes us return to the caller. |
163 #if defined(__thumb__) | 165 #if defined(__thumb__) |
164 "2:pop {r7, pc}\n" | 166 "2:pop {r7, pc}\n" |
165 ".cfi_endproc\n" | 167 ".cfi_endproc\n" |
166 #else | 168 #else |
167 "2:ldmfd sp!, {fp, pc}\n" | 169 "2:ldmfd sp!, {fp, pc}\n" |
168 #endif | 170 #endif |
169 ".fnend\n" | 171 ".fnend\n" |
170 "9:.size SyscallAsm, 9b-SyscallAsm\n" | 172 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
171 #endif | 173 #endif |
172 ); // asm | 174 ); // asm |
173 | 175 |
174 intptr_t SandboxSyscall(int nr, | 176 } // namespace |
175 intptr_t p0, intptr_t p1, intptr_t p2, | 177 |
176 intptr_t p3, intptr_t p4, intptr_t p5) { | 178 intptr_t Syscall::Call(int nr, |
179 intptr_t p0, | |
180 intptr_t p1, | |
181 intptr_t p2, | |
182 intptr_t p3, | |
183 intptr_t p4, | |
184 intptr_t p5) { | |
177 // We rely on "intptr_t" to be the exact size as a "void *". This is | 185 // We rely on "intptr_t" to be the exact size as a "void *". This is |
178 // typically true, but just in case, we add a check. The language | 186 // typically true, but just in case, we add a check. The language |
179 // specification allows platforms some leeway in cases, where | 187 // specification allows platforms some leeway in cases, where |
180 // "sizeof(void *)" is not the same as "sizeof(void (*)())". We expect | 188 // "sizeof(void *)" is not the same as "sizeof(void (*)())". We expect |
181 // that this would only be an issue for IA64, which we are currently not | 189 // that this would only be an issue for IA64, which we are currently not |
182 // planning on supporting. And it is even possible that this would work | 190 // planning on supporting. And it is even possible that this would work |
183 // on IA64, but for lack of actual hardware, I cannot test. | 191 // on IA64, but for lack of actual hardware, I cannot test. |
184 COMPILE_ASSERT(sizeof(void *) == sizeof(intptr_t), | 192 COMPILE_ASSERT(sizeof(void*) == sizeof(intptr_t), |
185 pointer_types_and_intptr_must_be_exactly_the_same_size); | 193 pointer_types_and_intptr_must_be_exactly_the_same_size); |
186 | 194 |
187 const intptr_t args[6] = { p0, p1, p2, p3, p4, p5 }; | 195 const intptr_t args[6] = {p0, p1, p2, p3, p4, p5}; |
188 | 196 |
189 // Invoke our file-scope assembly code. The constraints have been picked | 197 // Invoke our file-scope assembly code. The constraints have been picked |
190 // carefully to match what the rest of the assembly code expects in input, | 198 // carefully to match what the rest of the assembly code expects in input, |
191 // output, and clobbered registers. | 199 // output, and clobbered registers. |
192 #if defined(__i386__) | 200 #if defined(__i386__) |
193 intptr_t ret = nr; | 201 intptr_t ret = nr; |
194 asm volatile( | 202 asm volatile( |
195 "call SyscallAsm\n" | 203 "call SyscallAsm\n" |
196 // N.B. These are not the calling conventions normally used by the ABI. | 204 // N.B. These are not the calling conventions normally used by the ABI. |
197 : "=a"(ret) | 205 : "=a"(ret) |
198 : "0"(ret), "D"(args) | 206 : "0"(ret), "D"(args) |
199 : "cc", "esp", "memory", "ecx", "edx"); | 207 : "cc", "esp", "memory", "ecx", "edx"); |
200 #elif defined(__x86_64__) | 208 #elif defined(__x86_64__) |
201 intptr_t ret = nr; | 209 intptr_t ret = nr; |
202 { | 210 { |
203 register const intptr_t *data __asm__("r12") = args; | 211 register const intptr_t* data __asm__("r12") = args; |
204 asm volatile( | 212 asm volatile( |
205 "lea -128(%%rsp), %%rsp\n" // Avoid red zone. | 213 "lea -128(%%rsp), %%rsp\n" // Avoid red zone. |
206 "call SyscallAsm\n" | 214 "call SyscallAsm\n" |
207 "lea 128(%%rsp), %%rsp\n" | 215 "lea 128(%%rsp), %%rsp\n" |
208 // N.B. These are not the calling conventions normally used by the ABI. | 216 // N.B. These are not the calling conventions normally used by the ABI. |
209 : "=a"(ret) | 217 : "=a"(ret) |
210 : "0"(ret), "r"(data) | 218 : "0"(ret), "r"(data) |
211 : "cc", "rsp", "memory", | 219 : "cc", |
212 "rcx", "rdi", "rsi", "rdx", "r8", "r9", "r10", "r11"); | 220 "rsp", |
221 "memory", | |
222 "rcx", | |
223 "rdi", | |
224 "rsi", | |
225 "rdx", | |
226 "r8", | |
227 "r9", | |
228 "r10", | |
229 "r11"); | |
213 } | 230 } |
214 #elif defined(__arm__) | 231 #elif defined(__arm__) |
215 intptr_t ret; | 232 intptr_t ret; |
216 { | 233 { |
217 register intptr_t inout __asm__("r0") = nr; | 234 register intptr_t inout __asm__("r0") = nr; |
218 register const intptr_t *data __asm__("r6") = args; | 235 register const intptr_t* data __asm__("r6") = args; |
219 asm volatile( | 236 asm volatile( |
220 "bl SyscallAsm\n" | 237 "bl SyscallAsm\n" |
221 // N.B. These are not the calling conventions normally used by the ABI. | 238 // N.B. These are not the calling conventions normally used by the ABI. |
222 : "=r"(inout) | 239 : "=r"(inout) |
223 : "0"(inout), "r"(data) | 240 : "0"(inout), "r"(data) |
224 : "cc", "lr", "memory", "r1", "r2", "r3", "r4", "r5" | 241 : "cc", |
242 "lr", | |
243 "memory", | |
244 "r1", | |
245 "r2", | |
246 "r3", | |
247 "r4", | |
248 "r5" | |
225 #if !defined(__thumb__) | 249 #if !defined(__thumb__) |
226 // In thumb mode, we cannot use "r7" as a general purpose register, as | 250 // In thumb mode, we cannot use "r7" as a general purpose register, as |
227 // it is our frame pointer. We have to manually manage and preserve it. | 251 // it is our frame pointer. We have to manually manage and preserve |
228 // In ARM mode, we have a dedicated frame pointer register and "r7" is | 252 // it. |
229 // thus available as a general purpose register. We don't preserve it, | 253 // In ARM mode, we have a dedicated frame pointer register and "r7" is |
230 // but instead mark it as clobbered. | 254 // thus available as a general purpose register. We don't preserve it, |
231 , "r7" | 255 // but instead mark it as clobbered. |
256 , | |
257 "r7" | |
232 #endif // !defined(__thumb__) | 258 #endif // !defined(__thumb__) |
233 ); | 259 ); |
234 ret = inout; | 260 ret = inout; |
235 } | 261 } |
236 #else | 262 #else |
237 errno = ENOSYS; | 263 #error "Unimplemented architecture" |
238 intptr_t ret = -1; | |
239 #endif | 264 #endif |
240 return ret; | 265 return ret; |
241 } | 266 } |
242 | 267 |
243 } // namespace sandbox | 268 } // namespace sandbox |
OLD | NEW |