OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/base/division-by-constant.h" | 9 #include "src/base/division-by-constant.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 4688 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4699 // Initialize the JSValue. | 4699 // Initialize the JSValue. |
4700 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); | 4700 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); |
4701 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); | 4701 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); |
4702 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); | 4702 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); |
4703 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); | 4703 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); |
4704 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); | 4704 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); |
4705 sd(value, FieldMemOperand(result, JSValue::kValueOffset)); | 4705 sd(value, FieldMemOperand(result, JSValue::kValueOffset)); |
4706 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); | 4706 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); |
4707 } | 4707 } |
4708 | 4708 |
4709 | |
4710 void MacroAssembler::CopyBytes(Register src, | |
4711 Register dst, | |
4712 Register length, | |
4713 Register scratch) { | |
4714 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; | |
4715 | |
4716 // Align src before copying in word size chunks. | |
4717 Branch(&byte_loop, le, length, Operand(kPointerSize)); | |
4718 bind(&align_loop_1); | |
4719 And(scratch, src, kPointerSize - 1); | |
4720 Branch(&word_loop, eq, scratch, Operand(zero_reg)); | |
4721 lbu(scratch, MemOperand(src)); | |
4722 Daddu(src, src, 1); | |
4723 sb(scratch, MemOperand(dst)); | |
4724 Daddu(dst, dst, 1); | |
4725 Dsubu(length, length, Operand(1)); | |
4726 Branch(&align_loop_1, ne, length, Operand(zero_reg)); | |
4727 | |
4728 // Copy bytes in word size chunks. | |
4729 bind(&word_loop); | |
4730 if (emit_debug_code()) { | |
4731 And(scratch, src, kPointerSize - 1); | |
4732 Assert(eq, kExpectingAlignmentForCopyBytes, | |
4733 scratch, Operand(zero_reg)); | |
4734 } | |
4735 Branch(&byte_loop, lt, length, Operand(kPointerSize)); | |
4736 ld(scratch, MemOperand(src)); | |
4737 Daddu(src, src, kPointerSize); | |
4738 | |
4739 // TODO(kalmard) check if this can be optimized to use sw in most cases. | |
4740 // Can't use unaligned access - copy byte by byte. | |
4741 if (kArchEndian == kLittle) { | |
4742 sb(scratch, MemOperand(dst, 0)); | |
4743 dsrl(scratch, scratch, 8); | |
4744 sb(scratch, MemOperand(dst, 1)); | |
4745 dsrl(scratch, scratch, 8); | |
4746 sb(scratch, MemOperand(dst, 2)); | |
4747 dsrl(scratch, scratch, 8); | |
4748 sb(scratch, MemOperand(dst, 3)); | |
4749 dsrl(scratch, scratch, 8); | |
4750 sb(scratch, MemOperand(dst, 4)); | |
4751 dsrl(scratch, scratch, 8); | |
4752 sb(scratch, MemOperand(dst, 5)); | |
4753 dsrl(scratch, scratch, 8); | |
4754 sb(scratch, MemOperand(dst, 6)); | |
4755 dsrl(scratch, scratch, 8); | |
4756 sb(scratch, MemOperand(dst, 7)); | |
4757 } else { | |
4758 sb(scratch, MemOperand(dst, 7)); | |
4759 dsrl(scratch, scratch, 8); | |
4760 sb(scratch, MemOperand(dst, 6)); | |
4761 dsrl(scratch, scratch, 8); | |
4762 sb(scratch, MemOperand(dst, 5)); | |
4763 dsrl(scratch, scratch, 8); | |
4764 sb(scratch, MemOperand(dst, 4)); | |
4765 dsrl(scratch, scratch, 8); | |
4766 sb(scratch, MemOperand(dst, 3)); | |
4767 dsrl(scratch, scratch, 8); | |
4768 sb(scratch, MemOperand(dst, 2)); | |
4769 dsrl(scratch, scratch, 8); | |
4770 sb(scratch, MemOperand(dst, 1)); | |
4771 dsrl(scratch, scratch, 8); | |
4772 sb(scratch, MemOperand(dst, 0)); | |
4773 } | |
4774 Daddu(dst, dst, 8); | |
4775 | |
4776 Dsubu(length, length, Operand(kPointerSize)); | |
4777 Branch(&word_loop); | |
4778 | |
4779 // Copy the last bytes if any left. | |
4780 bind(&byte_loop); | |
4781 Branch(&done, eq, length, Operand(zero_reg)); | |
4782 bind(&byte_loop_1); | |
4783 lbu(scratch, MemOperand(src)); | |
4784 Daddu(src, src, 1); | |
4785 sb(scratch, MemOperand(dst)); | |
4786 Daddu(dst, dst, 1); | |
4787 Dsubu(length, length, Operand(1)); | |
4788 Branch(&byte_loop_1, ne, length, Operand(zero_reg)); | |
4789 bind(&done); | |
4790 } | |
4791 | |
4792 | |
4793 void MacroAssembler::InitializeFieldsWithFiller(Register current_address, | 4709 void MacroAssembler::InitializeFieldsWithFiller(Register current_address, |
4794 Register end_address, | 4710 Register end_address, |
4795 Register filler) { | 4711 Register filler) { |
4796 Label loop, entry; | 4712 Label loop, entry; |
4797 Branch(&entry); | 4713 Branch(&entry); |
4798 bind(&loop); | 4714 bind(&loop); |
4799 sd(filler, MemOperand(current_address)); | 4715 sd(filler, MemOperand(current_address)); |
4800 Daddu(current_address, current_address, kPointerSize); | 4716 Daddu(current_address, current_address, kPointerSize); |
4801 bind(&entry); | 4717 bind(&entry); |
4802 Branch(&loop, ult, current_address, Operand(end_address)); | 4718 Branch(&loop, ult, current_address, Operand(end_address)); |
(...skipping 2503 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7306 if (mag.shift > 0) sra(result, result, mag.shift); | 7222 if (mag.shift > 0) sra(result, result, mag.shift); |
7307 srl(at, dividend, 31); | 7223 srl(at, dividend, 31); |
7308 Addu(result, result, Operand(at)); | 7224 Addu(result, result, Operand(at)); |
7309 } | 7225 } |
7310 | 7226 |
7311 | 7227 |
7312 } // namespace internal | 7228 } // namespace internal |
7313 } // namespace v8 | 7229 } // namespace v8 |
7314 | 7230 |
7315 #endif // V8_TARGET_ARCH_MIPS64 | 7231 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |