Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(517)

Side by Side Diff: base/android/linker/crazy_linker/src/crazy_linker_elf_loader.cpp

Issue 23717023: Android: Add chrome-specific dynamic linker. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rename library Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "crazy_linker_elf_loader.h"
6
7 #include <limits.h> // For PAGE_SIZE and PAGE_MASK
8
9 #include "crazy_linker_debug.h"
10 #include "linker_phdr.h"
11
12 #define PAGE_START(x) ((x) & PAGE_MASK)
13 #define PAGE_OFFSET(x) ((x) & ~PAGE_MASK)
14 #define PAGE_END(x) PAGE_START((x) + (PAGE_SIZE-1))
15
16 namespace crazy {
17
18 #define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
19 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
20 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
21 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
22
23 ElfLoader::ElfLoader()
24 : fd_(),
25 path_(NULL),
26 phdr_num_(0),
27 phdr_mmap_(NULL),
28 phdr_table_(NULL),
29 phdr_size_(0),
30 file_offset_(0),
31 wanted_load_address_(0),
32 load_start_(NULL),
33 load_size_(0),
34 load_bias_(0),
35 loaded_phdr_(NULL) {
36 }
37
38 ElfLoader::~ElfLoader() {
39 if (phdr_mmap_) {
40 // Deallocate the temporary program header copy.
41 munmap(phdr_mmap_, phdr_size_);
42 }
43 }
44
45 bool ElfLoader::LoadAt(const char* lib_path,
46 off_t file_offset,
47 uintptr_t wanted_address,
48 Error* error) {
49
50 LOG("%s: lib_path='%s', file_offset=%p, load_address=%p\n",
51 __FUNCTION__, lib_path, file_offset, wanted_address);
52
53 // Check that the load address is properly page-aligned.
54 if (wanted_address != PAGE_START(wanted_address)) {
55 error->Format("Load address is not page aligned (%08x)",
56 wanted_address);
57 return false;
58 }
59 wanted_load_address_ = reinterpret_cast<void*>(wanted_address);
60
61 // Check that the file offset is also properly page-aligned.
62 if (file_offset != PAGE_START(file_offset)) {
63 error->Format("File offset is not page aligned (%08x)",
64 file_offset);
65 return false;
66 }
67 file_offset_ = file_offset;
68
69 // Open the file.
70 if (!fd_.OpenReadOnly(lib_path)) {
71 error->Format("Can't open file: %s", strerror(errno));
72 return false;
73 }
74
75 if (file_offset && fd_.SeekTo(file_offset) < 0) {
76 error->Format("Can't seek to file offset %08x: %s",
77 file_offset, strerror(errno));
78 return false;
79 }
80
81 path_ = lib_path;
82
83 if (!ReadElfHeader(error) ||
84 !ReadProgramHeader(error) ||
85 !ReserveAddressSpace(error)) {
86 return false;
87 }
88
89 if (!LoadSegments(error) || !FindPhdr(error)) {
90 // An error occured, cleanup the address space by un-mapping the
91 // range that was reserved by ReserveAddressSpace().
92 if (load_start_ && load_size_)
93 munmap(load_start_, load_size_);
94
95 return false;
96 }
97
98 return true;
99 }
100
101 bool ElfLoader::ReadElfHeader(Error* error) {
102 int ret = fd_.Read(&header_, sizeof(header_));
103 if (ret < 0) {
104 error->Format("Can't read file: %s", strerror(errno));
105 return false;
106 }
107 if (ret != static_cast<int>(sizeof(header_))) {
108 error->Set("File too small to be ELF");
109 return false;
110 }
111
112 if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
113 header_.e_ident[EI_MAG1] != ELFMAG1 ||
114 header_.e_ident[EI_MAG2] != ELFMAG2 ||
115 header_.e_ident[EI_MAG3] != ELFMAG3) {
116 error->Set("Bad ELF magic");
117 return false;
118 }
119
120 if (header_.e_ident[EI_CLASS] != ELFCLASS32) {
121 error->Format("Not a 32-bit class: %d",
122 header_.e_ident[EI_CLASS]);
123 return false;
124 }
125 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
126 error->Format("Not little-endian class: %d",
127 header_.e_ident[EI_DATA]);
128 return false;
129 }
130
131 if (header_.e_type != ET_DYN) {
132 error->Format("Not a shared library type: %d",
133 header_.e_type);
134 return false;
135 }
136
137 if (header_.e_version != EV_CURRENT) {
138 error->Format("Unexpected ELF version: %d",
139 header_.e_version);
140 return false;
141 }
142
143 if (header_.e_machine != ELF_MACHINE) {
144 error->Format("Unexpected ELF machine type: %d",
145 header_.e_machine);
146 return false;
147 }
148
149 return true;
150 }
151
152 // Loads the program header table from an ELF file into a read-only private
153 // anonymous mmap-ed block.
154 bool ElfLoader::ReadProgramHeader(Error* error) {
155 phdr_num_ = header_.e_phnum;
156
157 // Like the kernel, only accept program header tables smaller than 64 KB.
158 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ELF::Phdr)) {
159 error->Format("Invalid program header count: %d", phdr_num_);
160 return false;
161 }
162
163 ELF::Addr page_min = PAGE_START(header_.e_phoff);
164 ELF::Addr page_max =
165 PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ELF::Phdr)));
166 ELF::Addr page_offset = PAGE_OFFSET(header_.e_phoff);
167
168 phdr_size_ = page_max - page_min;
169
170 void* mmap_result = fd_.Map(NULL,
171 phdr_size_,
172 PROT_READ,
173 MAP_PRIVATE,
174 page_min + file_offset_);
175 if (mmap_result == MAP_FAILED) {
176 error->Format("Phdr mmap failed: %s", strerror(errno));
177 return false;
178 }
179
180 phdr_mmap_ = mmap_result;
181 phdr_table_ = reinterpret_cast<ELF::Phdr*>(
182 reinterpret_cast<char*>(mmap_result) + page_offset);
183 return true;
184 }
185
186 // Reserve a virtual address range big enough to hold all loadable
187 // segments of a program header table. This is done by creating a
188 // private anonymous mmap() with PROT_NONE.
189 //
190 // This will use the wanted_load_address_ value,
191 bool ElfLoader::ReserveAddressSpace(Error* error) {
192 ELF::Addr min_vaddr;
193 load_size_ =
194 phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr, NULL);
195 if (load_size_ == 0) {
196 error->Set("No loadable segments");
197 return false;
198 }
199
200 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
201 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
202
203 // Support loading at a fixed address.
204 if (wanted_load_address_) {
205 addr = static_cast<uint8_t*>(wanted_load_address_);
206 mmap_flags |= MAP_FIXED;
207 }
208
209 LOG("%s: address=%p size=%p\n", __FUNCTION__, addr, load_size_);
210 void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
211 if (start == MAP_FAILED) {
212 error->Format("Could not reserve %d bytes of address space",
213 load_size_);
214 return false;
215 }
216
217 load_start_ = start;
218 load_bias_ = reinterpret_cast<ELF::Addr>(start) - min_vaddr;
219 return true;
220 }
221
222 // Returns the address of the program header table as it appears in the loaded
223 // segments in memory. This is in contrast with 'phdr_table_' which
224 // is temporary and will be released before the library is relocated.
225 bool ElfLoader::FindPhdr(Error* error) {
226 const ELF::Phdr* phdr_limit = phdr_table_ + phdr_num_;
227
228 // If there is a PT_PHDR, use it directly.
229 for (const ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
230 if (phdr->p_type == PT_PHDR) {
231 return CheckPhdr(load_bias_ + phdr->p_vaddr, error);
232 }
233 }
234
235 // Otherwise, check the first loadable segment. If its file offset
236 // is 0, it starts with the ELF header, and we can trivially find the
237 // loaded program header from it.
238 for (const ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
239 if (phdr->p_type == PT_LOAD) {
240 if (phdr->p_offset == 0) {
241 ELF::Addr elf_addr = load_bias_ + phdr->p_vaddr;
242 const ELF::Ehdr* ehdr = (const ELF::Ehdr*)(void*)elf_addr;
243 ELF::Addr offset = ehdr->e_phoff;
244 return CheckPhdr((ELF::Addr)ehdr + offset, error);
245 }
246 break;
247 }
248 }
249
250 error->Set("Can't find loaded program header");
251 return false;
252 }
253
254 // Ensures that our program header is actually within a loadable
255 // segment. This should help catch badly-formed ELF files that
256 // would cause the linker to crash later when trying to access it.
257 bool ElfLoader::CheckPhdr(ELF::Addr loaded, Error* error) {
258 const ELF::Phdr* phdr_limit = phdr_table_ + phdr_num_;
259 ELF::Addr loaded_end = loaded + (phdr_num_ * sizeof(ELF::Phdr));
260 for (ELF::Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
261 if (phdr->p_type != PT_LOAD) {
262 continue;
263 }
264 ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
265 ELF::Addr seg_end = phdr->p_filesz + seg_start;
266 if (seg_start <= loaded && loaded_end <= seg_end) {
267 loaded_phdr_ = reinterpret_cast<const ELF::Phdr*>(loaded);
268 return true;
269 }
270 }
271 error->Format("Loaded program header %x not in loadable segment",
272 loaded);
273 return false;
274 }
275
276 // Map all loadable segments in process' address space.
277 // This assumes you already called phdr_table_reserve_memory to
278 // reserve the address space range for the library.
279 bool ElfLoader::LoadSegments(Error* error) {
280 for (size_t i = 0; i < phdr_num_; ++i) {
281 const ELF::Phdr* phdr = &phdr_table_[i];
282
283 if (phdr->p_type != PT_LOAD) {
284 continue;
285 }
286
287 // Segment addresses in memory.
288 ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
289 ELF::Addr seg_end = seg_start + phdr->p_memsz;
290
291 ELF::Addr seg_page_start = PAGE_START(seg_start);
292 ELF::Addr seg_page_end = PAGE_END(seg_end);
293
294 ELF::Addr seg_file_end = seg_start + phdr->p_filesz;
295
296 // File offsets.
297 ELF::Addr file_start = phdr->p_offset;
298 ELF::Addr file_end = file_start + phdr->p_filesz;
299
300 ELF::Addr file_page_start = PAGE_START(file_start);
301 ELF::Addr file_length = file_end - file_page_start;
302
303 LOG("%s: file_offset=%p file_length=%p start_address=%p end_address=%p\n",
304 __FUNCTION__, file_offset_ + file_page_start,
305 file_length, seg_page_start, seg_page_start + PAGE_END(file_length));
306
307 if (file_length != 0) {
308 void* seg_addr = fd_.Map((void*)seg_page_start,
309 file_length,
310 PFLAGS_TO_PROT(phdr->p_flags),
311 MAP_FIXED|MAP_PRIVATE,
312 file_page_start + file_offset_);
313 if (seg_addr == MAP_FAILED) {
314 error->Format("Could not map segment %d: %s", i, strerror(errno));
315 return false;
316 }
317 }
318
319 // if the segment is writable, and does not end on a page boundary,
320 // zero-fill it until the page limit.
321 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
322 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
323 }
324
325 seg_file_end = PAGE_END(seg_file_end);
326
327 // seg_file_end is now the first page address after the file
328 // content. If seg_end is larger, we need to zero anything
329 // between them. This is done by using a private anonymous
330 // map for all extra pages.
331 if (seg_page_end > seg_file_end) {
332 void* zeromap = mmap((void*)seg_file_end,
333 seg_page_end - seg_file_end,
334 PFLAGS_TO_PROT(phdr->p_flags),
335 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
336 -1,
337 0);
338 if (zeromap == MAP_FAILED) {
339 error->Format("Could not zero-fill gap: %s", strerror(errno));
340 return false;
341 }
342 }
343 }
344 return true;
345 }
346
347 } // namespace crazy
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698