| OLD | NEW |
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include <errno.h> |
| 6 #include <stdarg.h> |
| 7 #include <asm/unistd.h> |
| 8 |
| 9 #include "sandbox/linux/seccomp-bpf/syscall.h" |
| 10 |
| 11 |
| 12 namespace playground2 { |
| 13 |
| 14 asm( // We need to be able to tell the kernel exactly where we made a |
| 15 // system call. The C++ compiler likes to sometimes clone or |
| 16 // inline code, which would inadvertently end up duplicating |
| 17 // the entry point. |
| 18 // "gcc" can suppress code duplication with suitable function |
| 19 // attributes, but "clang" doesn't have this ability. |
| 20 // The "clang" developer mailing list suggested that the correct |
| 21 // and portable solution is a file-scope assembly block. |
| 22 // N.B. We do mark our code as a proper function so that backtraces |
| 23 // work correctly. But we make absolutely no attempt to use the |
| 24 // ABI's calling conventions for passing arguments. We will only |
| 25 // ever be called from assembly code and thus can pick more |
| 26 // suitable calling conventions. |
| 27 #if defined(__i386__) |
| 28 ".text\n" |
| 29 ".align 16, 0x90\n" |
| 30 ".type SyscallAsm, @function\n" |
| 31 "SyscallAsm:.cfi_startproc\n" |
| 32 // Check if "%eax" is negative. If so, do not attempt to make a |
| 33 // system call. Instead, compute the return address that is visible |
| 34 // to the kernel after we execute "int $0x80". This address can be |
| 35 // used as a marker that BPF code inspects. |
| 36 "test %eax, %eax\n" |
| 37 "jge 1f\n" |
| 38 // Always, make sure that our code is position-independent, or |
| 39 // address space randomization might not work on i386. This means, |
| 40 // we can't use "lea", but instead have to rely on "call/pop". |
| 41 "call 0f; .cfi_adjust_cfa_offset 4\n" |
| 42 "0:pop %eax; .cfi_adjust_cfa_offset -4\n" |
| 43 "addl $2f-0b, %eax\n" |
| 44 "ret\n" |
| 45 // Save register that we don't want to clobber. On i386, we need to |
| 46 // save relatively aggressively, as there are a couple or registers |
| 47 // that are used internally (e.g. %ebx for position-independent |
| 48 // code, and %ebp for the frame pointer), and as we need to keep at |
| 49 // least a few registers available for the register allocator. |
| 50 "1:push %esi; .cfi_adjust_cfa_offset 4\n" |
| 51 "push %edi; .cfi_adjust_cfa_offset 4\n" |
| 52 "push %ebx; .cfi_adjust_cfa_offset 4\n" |
| 53 "push %ebp; .cfi_adjust_cfa_offset 4\n" |
| 54 // Copy entries from the array holding the arguments into the |
| 55 // correct CPU registers. |
| 56 "movl 0(%edi), %ebx\n" |
| 57 "movl 4(%edi), %ecx\n" |
| 58 "movl 8(%edi), %edx\n" |
| 59 "movl 12(%edi), %esi\n" |
| 60 "movl 20(%edi), %ebp\n" |
| 61 "movl 16(%edi), %edi\n" |
| 62 // Enter the kernel. |
| 63 "int $0x80\n" |
| 64 // This is our "magic" return address that the BPF filter sees. |
| 65 "2:" |
| 66 // Restore any clobbered registers that we didn't declare to the |
| 67 // compiler. |
| 68 "pop %ebp; .cfi_adjust_cfa_offset -4\n" |
| 69 "pop %ebx; .cfi_adjust_cfa_offset -4\n" |
| 70 "pop %edi; .cfi_adjust_cfa_offset -4\n" |
| 71 "pop %esi; .cfi_adjust_cfa_offset -4\n" |
| 72 "ret\n" |
| 73 ".cfi_endproc\n" |
| 74 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
| 75 #elif defined(__x86_64__) |
| 76 ".text\n" |
| 77 ".align 16, 0x90\n" |
| 78 ".type SyscallAsm, @function\n" |
| 79 "SyscallAsm:.cfi_startproc\n" |
| 80 // Check if "%rax" is negative. If so, do not attempt to make a |
| 81 // system call. Instead, compute the return address that is visible |
| 82 // to the kernel after we execute "syscall". This address can be |
| 83 // used as a marker that BPF code inspects. |
| 84 "test %rax, %rax\n" |
| 85 "jge 1f\n" |
| 86 // Always make sure that our code is position-independent, or the |
| 87 // linker will throw a hissy fit on x86-64. |
| 88 "call 0f; .cfi_adjust_cfa_offset 8\n" |
| 89 "0:pop %rax; .cfi_adjust_cfa_offset -8\n" |
| 90 "addq $2f-0b, %rax\n" |
| 91 "ret\n" |
| 92 // We declared all clobbered registers to the compiler. On x86-64, |
| 93 // there really isn't much of a problem with register pressure. So, |
| 94 // we can go ahead and directly copy the entries from the arguments |
| 95 // array into the appropriate CPU registers. |
| 96 "1:movq 0(%r12), %rdi\n" |
| 97 "movq 8(%r12), %rsi\n" |
| 98 "movq 16(%r12), %rdx\n" |
| 99 "movq 24(%r12), %r10\n" |
| 100 "movq 32(%r12), %r8\n" |
| 101 "movq 40(%r12), %r9\n" |
| 102 // Enter the kernel. |
| 103 "syscall\n" |
| 104 // This is our "magic" return address that the BPF filter sees. |
| 105 "2:ret\n" |
| 106 ".cfi_endproc\n" |
| 107 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
| 108 #elif defined(__arm__) |
| 109 // Throughout this file, we use the same mode (ARM vs. thumb) |
| 110 // that the C++ compiler uses. This means, when transfering control |
| 111 // from C++ to assembly code, we do not need to switch modes (e.g. |
| 112 // by using the "bx" instruction). It also means that our assembly |
| 113 // code should not be invoked directly from code that lives in |
| 114 // other compilation units, as we don't bother implementing thumb |
| 115 // interworking. That's OK, as we don't make any of the assembly |
| 116 // symbols public. They are all local to this file. |
| 117 ".text\n" |
| 118 ".align 2\n" |
| 119 ".type SyscallAsm, %function\n" |
| 120 #if defined(__thumb__) |
| 121 ".thumb_func\n" |
| 122 #else |
| 123 ".arm\n" |
| 124 #endif |
| 125 "SyscallAsm:.fnstart\n" |
| 126 "@ args = 0, pretend = 0, frame = 8\n" |
| 127 "@ frame_needed = 1, uses_anonymous_args = 0\n" |
| 128 #if defined(__thumb__) |
| 129 ".cfi_startproc\n" |
| 130 "push {r7, lr}\n" |
| 131 ".cfi_offset 14, -4\n" |
| 132 ".cfi_offset 7, -8\n" |
| 133 "mov r7, sp\n" |
| 134 ".cfi_def_cfa_register 7\n" |
| 135 ".cfi_def_cfa_offset 8\n" |
| 136 #else |
| 137 "stmfd sp!, {fp, lr}\n" |
| 138 "add fp, sp, #4\n" |
| 139 #endif |
| 140 // Check if "r0" is negative. If so, do not attempt to make a |
| 141 // system call. Instead, compute the return address that is visible |
| 142 // to the kernel after we execute "swi 0". This address can be |
| 143 // used as a marker that BPF code inspects. |
| 144 "cmp r0, #0\n" |
| 145 "bge 1f\n" |
| 146 "ldr r0, =2f\n" |
| 147 "b 2f\n" |
| 148 // We declared (almost) all clobbered registers to the compiler. On |
| 149 // ARM there is no particular register pressure. So, we can go |
| 150 // ahead and directly copy the entries from the arguments array |
| 151 // into the appropriate CPU registers. |
| 152 "1:ldr r5, [r6, #20]\n" |
| 153 "ldr r4, [r6, #16]\n" |
| 154 "ldr r3, [r6, #12]\n" |
| 155 "ldr r2, [r6, #8]\n" |
| 156 "ldr r1, [r6, #4]\n" |
| 157 "mov r7, r0\n" |
| 158 "ldr r0, [r6, #0]\n" |
| 159 // Enter the kernel |
| 160 "swi 0\n" |
| 161 // Restore the frame pointer. Also restore the program counter from |
| 162 // the link register; this makes us return to the caller. |
| 163 #if defined(__thumb__) |
| 164 "2:pop {r7, pc}\n" |
| 165 ".cfi_endproc\n" |
| 166 #else |
| 167 "2:ldmfd sp!, {fp, pc}\n" |
| 168 #endif |
| 169 ".fnend\n" |
| 170 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
| 171 #endif |
| 172 ); // asm |
| 173 |
| 174 intptr_t Syscall(int nr, ...) { |
| 175 // It is most convenient for the caller to pass a variadic list of arguments. |
| 176 // But this is difficult to handle in assembly code without making |
| 177 // assumptions about internal implementation details of "va_list". So, we |
| 178 // first use C code to copy all the arguments into an array, where they are |
| 179 // easily accessible to asm(). |
| 180 // This is preferable over copying them into individual variables, which |
| 181 // can result in too much register pressure. |
| 182 void *args[6]; |
| 183 va_list ap; |
| 184 |
| 185 // System calls take a system call number (typically passed in %eax or |
| 186 // %rax) and up to six arguments (passed in general-purpose CPU registers). |
| 187 // |
| 188 // On 32bit systems, all variadic arguments are passed on the stack as 32bit |
| 189 // quantities. We can use an arbitrary 32bit type to retrieve them with |
| 190 // va_arg() and then forward them to the kernel in the appropriate CPU |
| 191 // register. We do not need to know whether this is an integer or a pointer |
| 192 // value. |
| 193 // |
| 194 // On 64bit systems, variadic arguments can be either 32bit or 64bit wide, |
| 195 // which would seem to make it more important that we pass the correct type |
| 196 // to va_arg(). And we really can't know what this type is unless we have a |
| 197 // table with function signatures for all system calls. |
| 198 // |
| 199 // Fortunately, on x86-64 this is less critical. The first six function |
| 200 // arguments will be passed in CPU registers, no matter whether they were |
| 201 // named or variadic. This only leaves us with a single argument (if present) |
| 202 // that could be passed on the stack. And since x86-64 is little endian, |
| 203 // it will have the correct value both for 32bit and 64bit quantities. |
| 204 // |
| 205 // N.B. Because of how the x86-64 ABI works, it is possible that 32bit |
| 206 // quantities will have undefined garbage bits in the upper 32 bits of a |
| 207 // 64bit register. This is relatively unlikely for the first five system |
| 208 // call arguments, as the processor does automatic sign extensions and zero |
| 209 // filling so frequently, there rarely is garbage in CPU registers. But it |
| 210 // is quite likely for the last argument, which is passed on the stack. |
| 211 // That's generally OK, because the kernel has the correct function |
| 212 // signatures and knows to only inspect the LSB of a 32bit value. |
| 213 // But callers must be careful in cases, where the compiler cannot tell |
| 214 // the difference (e.g. when passing NULL to any system call, it must |
| 215 // always be cast to a pointer type). |
| 216 // The glibc implementation of syscall() has the exact same issues. |
| 217 // In the unlikely event that this ever becomes a problem, we could add |
| 218 // code that handles six-argument system calls specially. The number of |
| 219 // system calls that take six arguments and expect a 32bit value in the |
| 220 // sixth argument is very limited. |
| 221 va_start(ap, nr); |
| 222 args[0] = va_arg(ap, void *); |
| 223 args[1] = va_arg(ap, void *); |
| 224 args[2] = va_arg(ap, void *); |
| 225 args[3] = va_arg(ap, void *); |
| 226 args[4] = va_arg(ap, void *); |
| 227 args[5] = va_arg(ap, void *); |
| 228 va_end(ap); |
| 229 |
| 230 // Invoke our file-scope assembly code. The constraints have been picked |
| 231 // carefully to match what the rest of the assembly code expects in input, |
| 232 // output, and clobbered registers. |
| 233 #if defined(__i386__) |
| 234 intptr_t ret = nr; |
| 235 asm volatile( |
| 236 "call SyscallAsm\n" |
| 237 // N.B. These are not the calling conventions normally used by the ABI. |
| 238 : "=a"(ret) |
| 239 : "0"(ret), "D"(args) |
| 240 : "esp", "memory", "ecx", "edx"); |
| 241 #elif defined(__x86_64__) |
| 242 intptr_t ret = nr; |
| 243 { |
| 244 register void **data __asm__("r12") = args; |
| 245 asm volatile( |
| 246 "call SyscallAsm\n" |
| 247 // N.B. These are not the calling conventions normally used by the ABI. |
| 248 : "=a"(ret) |
| 249 : "0"(ret), "r"(data) |
| 250 : "rsp", "memory", |
| 251 "rcx", "rdi", "rsi", "rdx", "r8", "r9", "r10", "r11"); |
| 252 } |
| 253 #elif defined(__arm__) |
| 254 intptr_t ret; |
| 255 { |
| 256 register intptr_t inout __asm__("r0") = nr; |
| 257 register void **data __asm__("r6") = args; |
| 258 asm volatile( |
| 259 "bl SyscallAsm\n" |
| 260 // N.B. These are not the calling conventions normally used by the ABI. |
| 261 : "=r"(inout) |
| 262 : "0"(inout), "r"(data) |
| 263 : "lr", "memory", "r1", "r2", "r3", "r4", "r5" |
| 264 #if !defined(__arm__) |
| 265 // In thumb mode, we cannot use "r7" as a general purpose register, as |
| 266 // it is our frame pointer. We have to manually manage and preserve it. |
| 267 // In ARM mode, we have a dedicated frame pointer register and "r7" is |
| 268 // thus available as a general purpose register. We don't preserve it, |
| 269 // but instead mark it as clobbered. |
| 270 , "r7" |
| 271 #endif |
| 272 ); |
| 273 ret = inout; |
| 274 } |
| 275 #else |
| 276 errno = ENOSYS; |
| 277 intptr_t ret = -1; |
| 278 #endif |
| 279 return ret; |
| 280 } |
| 281 |
| 282 } // namespace |
| OLD | NEW |