Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1709)

Unified Diff: src/mips/code-stubs-mips.cc

Issue 10878047: Revert to code state of 3.13.1 plus r12350 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/deoptimizer-mips.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips/code-stubs-mips.cc
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index abc82a7c97989bea450941d78ada470bac4dfeb4..63e3b4454e4359dd7752a15b9159d9fed740e9b2 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -108,10 +108,10 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current native context and set that
+ // Compute the function map in the current global context and set that
// as the map of the allocated object.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
__ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
__ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -151,8 +151,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
- // a2 holds native context, a1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
+ // a2 holds global context, a1 points to fixed array of 3-element entries
+ // (global context, optimized code, literals).
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into t0.
@@ -244,12 +244,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
// Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
@@ -291,9 +291,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ li(a2, Operand(Smi::FromInt(length)));
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- // If this block context is nested in the native context we get a smi
+ // If this block context is nested in the global context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
+ // canonical empty function of the global context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(a3, &after_sentinel);
@@ -302,16 +302,16 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(eq, message, a3, Operand(zero_reg));
}
__ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
+ __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
@@ -4646,14 +4646,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// v0 = address of new object(s) (tagged)
// a2 = argument count (tagged)
- // Get the arguments boilerplate from the current native context into t0.
+ // Get the arguments boilerplate from the current (global) context into t0.
const int kNormalOffset =
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
@@ -4841,9 +4841,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
- // Get the arguments boilerplate from the current native context.
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
+ // Get the arguments boilerplate from the current (global) context.
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
__ lw(t0, MemOperand(t0, Context::SlotOffset(
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
@@ -5376,10 +5376,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ Addu(a3, v0, Operand(JSRegExpResult::kSize));
__ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
__ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
__ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
@@ -5488,8 +5488,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&call, ne, t0, Operand(at));
// Patch the receiver on the stack with the global receiver object.
- __ lw(a3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(a3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
__ sw(a3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/deoptimizer-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698