| Index: src/ia32/lithium-codegen-ia32.cc
|
| diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
|
| index e03f73323d40968466de4162346693092d4dc77e..42e08d6dea340d459ef809edf6c1e14506ea15da 100644
|
| --- a/src/ia32/lithium-codegen-ia32.cc
|
| +++ b/src/ia32/lithium-codegen-ia32.cc
|
| @@ -595,6 +595,27 @@ void LCodeGen::CallCode(Handle<Code> code,
|
| }
|
|
|
|
|
| +void LCodeGen::CallCodeFromDeferred(Handle<Code> code,
|
| + RelocInfo::Mode mode,
|
| + LInstruction* instr) {
|
| + //CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
|
| +
|
| + ASSERT(instr != NULL);
|
| + LPointerMap* pointers = instr->pointer_map();
|
| + RecordPosition(pointers->position());
|
| + __ call(code, mode);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
| +
|
| + // Signal that we don't inline smi code before these stubs in the
|
| + // optimizing code generator.
|
| + if (code->kind() == Code::BINARY_OP_IC ||
|
| + code->kind() == Code::COMPARE_IC) {
|
| + __ nop();
|
| + }
|
| +}
|
| +
|
| +
|
| void LCodeGen::CallRuntime(const Runtime::Function* fun,
|
| int argc,
|
| LInstruction* instr) {
|
| @@ -633,6 +654,16 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
| }
|
|
|
|
|
| +void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
| + int argc,
|
| + LInstruction* instr) {
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| + __ CallRuntimeSaveDoubles(id);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
|
| +}
|
| +
|
| +
|
| void LCodeGen::RegisterEnvironmentForDeoptimization(
|
| LEnvironment* environment, Safepoint::DeoptMode mode) {
|
| if (!environment->HasBeenRegistered()) {
|
| @@ -4057,6 +4088,54 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
| }
|
|
|
|
|
| +class DeferredTransitionElementsKind: public LDeferredCode {
|
| + public:
|
| + DeferredTransitionElementsKind(LCodeGen* codegen, LTransitionElementsKind* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() { codegen()->DoDeferredTransitionElementsKind(instr_); }
|
| + virtual LInstruction* instr() { return instr_; }
|
| + private:
|
| + LTransitionElementsKind* instr_;
|
| +};
|
| +
|
| +
|
| +void LCodeGen::DoDeferredTransitionElementsKind(LTransitionElementsKind* instr) {
|
| + Register object_reg = ToRegister(instr->object());
|
| + Handle<Map> to_map = instr->transitioned_map();
|
| + ElementsKind to_kind = to_map->elements_kind();
|
| +
|
| + PushSafepointRegistersScope scope(this);
|
| +
|
| + /*
|
| + Handle<Code> code = IsFastDoubleElementsKind(to_kind)
|
| + ? isolate()->builtins()->TransitionElementsSmiToDouble()
|
| + : isolate()->builtins()->TransitionElementsDoubleToObject();
|
| +
|
| + __ mov(ebx, to_map);
|
| + if (!edx.is(object_reg)) {
|
| + __ mov(edx, object_reg);
|
| + }
|
| +
|
| +
|
| + CallCodeFromDeferred(code,
|
| + RelocInfo::CODE_TARGET,
|
| + instr);
|
| + // RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
| + */
|
| +
|
| + Runtime::FunctionId function_id = IsFastDoubleElementsKind(to_kind)
|
| + ? Runtime::kTransitionElementsSmiToDouble
|
| + : Runtime::kTransitionElementsDoubleToObject;
|
| +
|
| + __ push(object_reg);
|
| + CallRuntimeFromDeferred(function_id, 1, instr);
|
| +
|
| + // __ AssertSmi(eax);
|
| + // __ SmiUntag(eax);
|
| + __ StoreToSafepointRegisterSlot(object_reg, eax);
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
| Register object_reg = ToRegister(instr->object());
|
| Register new_map_reg = ToRegister(instr->new_map_temp());
|
| @@ -4073,6 +4152,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
| is_simple_map_transition ? Label::kNear : Label::kFar;
|
| __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
|
| __ j(not_equal, ¬_applicable, branch_distance);
|
| +
|
| if (is_simple_map_transition) {
|
| Register object_reg = ToRegister(instr->object());
|
| Handle<Map> map = instr->hydrogen()->transitioned_map();
|
| @@ -4083,24 +4163,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
| __ RecordWriteForMap(object_reg, to_map, new_map_reg,
|
| ToRegister(instr->temp()),
|
| kDontSaveFPRegs);
|
| - } else if (IsFastSmiElementsKind(from_kind) &&
|
| - IsFastDoubleElementsKind(to_kind)) {
|
| - __ mov(new_map_reg, to_map);
|
| - Register fixed_object_reg = ToRegister(instr->temp());
|
| - ASSERT(fixed_object_reg.is(edx));
|
| - ASSERT(new_map_reg.is(ebx));
|
| - __ mov(fixed_object_reg, object_reg);
|
| - CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
|
| - RelocInfo::CODE_TARGET, instr);
|
| - } else if (IsFastDoubleElementsKind(from_kind) &&
|
| - IsFastObjectElementsKind(to_kind)) {
|
| - __ mov(new_map_reg, to_map);
|
| - Register fixed_object_reg = ToRegister(instr->temp());
|
| - ASSERT(fixed_object_reg.is(edx));
|
| - ASSERT(new_map_reg.is(ebx));
|
| - __ mov(fixed_object_reg, object_reg);
|
| - CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
|
| - RelocInfo::CODE_TARGET, instr);
|
| + } else if ((IsFastSmiElementsKind(from_kind) &&
|
| + IsFastDoubleElementsKind(to_kind)) ||
|
| + (IsFastDoubleElementsKind(from_kind) &&
|
| + IsFastObjectElementsKind(to_kind))) {
|
| + DeferredTransitionElementsKind* deferred =
|
| + new(zone()) DeferredTransitionElementsKind(this, instr);
|
| + __ jmp(deferred->entry());
|
| + __ bind(deferred->exit());
|
| } else {
|
| UNREACHABLE();
|
| }
|
| @@ -5165,27 +5235,42 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
|
|
|
| void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
|
| ASSERT(ToRegister(instr->context()).is(esi));
|
| - int size = instr->hydrogen()->total_size();
|
| - ElementsKind boilerplate_elements_kind =
|
| - instr->hydrogen()->boilerplate()->GetElementsKind();
|
| -
|
| - // Deopt if the literal boilerplate ElementsKind is of a type different than
|
| - // the expected one. The check isn't necessary if the boilerplate has already
|
| - // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
|
| - if (CanTransitionToMoreGeneralFastElementsKind(
|
| - boilerplate_elements_kind, true)) {
|
| - __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
|
| - __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
|
| - // Load the map's "bit field 2". We only need the first byte,
|
| - // but the following masking takes care of that anyway.
|
| - __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
|
| - // Retrieve elements_kind from bit field 2.
|
| - __ and_(ecx, Map::kElementsKindMask);
|
| - __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
|
| - DeoptimizeIf(not_equal, instr->environment());
|
| + Handle<JSObject> result = instr->hydrogen()->boilerplate();
|
| +
|
| + if (instr->hydrogen()->TransitionRequested()) {
|
| + ElementsKind to_kind = instr->hydrogen()->TransitionTo();
|
| + result = Runtime::DeepCopyBoilerplate(isolate(),
|
| + instr->hydrogen()->boilerplate());
|
| + CHECK(!result.is_null());
|
| + // Now transition the copy
|
| + CHECK(!JSObject::TransitionElementsKind(result, to_kind).is_null());
|
| + } else {
|
| + // Deopt if the literal boilerplate ElementsKind is of a type different than
|
| + // the expected one. The check isn't necessary if the boilerplate has
|
| + // already already been converted to TERMINAL_FAST_ELEMENTS_KIND.
|
| + ElementsKind boilerplate_elements_kind = result->GetElementsKind();
|
| + if (CanTransitionToMoreGeneralFastElementsKind(
|
| + boilerplate_elements_kind, true)) {
|
| + __ LoadHeapObject(ebx, result);
|
| + __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
|
| + // Load the map's "bit field 2". We only need the first byte,
|
| + // but the following masking takes care of that anyway.
|
| + __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
|
| + // Retrieve elements_kind from bit field 2.
|
| + __ and_(ecx, Map::kElementsKindMask);
|
| + __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
|
| + DeoptimizeIf(not_equal, instr->environment());
|
| + }
|
| }
|
|
|
| - // Allocate all objects that are part of the literal in one big
|
| + // We need to compute the size now.
|
| + int size = 0;
|
| + int max_properties = HFastLiteral::kMaxLiteralProperties;
|
| + HFastLiteral::IsFastLiteral(result,
|
| + HFastLiteral::kMaxLiteralDepth,
|
| + &max_properties,
|
| + &size);
|
| +
|
| // allocation. This avoids multiple limit checks.
|
| Label allocated, runtime_allocate;
|
| __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
|
| @@ -5197,8 +5282,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
|
|
|
| __ bind(&allocated);
|
| int offset = 0;
|
| - __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
|
| - EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
|
| + __ LoadHeapObject(ebx, result);
|
| + EmitDeepCopy(result, eax, ebx, &offset);
|
| ASSERT_EQ(size, offset);
|
| }
|
|
|
|
|