diff options
Diffstat (limited to 'V8Binding/v8/src/ia32')
-rw-r--r-- | V8Binding/v8/src/ia32/codegen-ia32.cc | 18 | ||||
-rw-r--r-- | V8Binding/v8/src/ia32/ic-ia32.cc | 38 | ||||
-rw-r--r-- | V8Binding/v8/src/ia32/macro-assembler-ia32.cc | 9 | ||||
-rw-r--r-- | V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc | 10 | ||||
-rw-r--r-- | V8Binding/v8/src/ia32/register-allocator-ia32-inl.h | 8 | ||||
-rw-r--r-- | V8Binding/v8/src/ia32/stub-cache-ia32.cc | 6 |
6 files changed, 52 insertions, 37 deletions
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.cc b/V8Binding/v8/src/ia32/codegen-ia32.cc index 6d1dc2d..457b22f 100644 --- a/V8Binding/v8/src/ia32/codegen-ia32.cc +++ b/V8Binding/v8/src/ia32/codegen-ia32.cc @@ -3857,7 +3857,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( s = s->outer_scope(); } - if (s->is_eval_scope()) { + if (s != NULL && s->is_eval_scope()) { // Loop up the context chain. There is no frame effect so it is // safe to use raw labels here. Label next, fast; @@ -4351,7 +4351,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { FieldOperand(elements.reg(), JSObject::kElementsOffset)); // Write to the indexed properties array. - int offset = i * kPointerSize + Array::kHeaderSize; + int offset = i * kPointerSize + FixedArray::kHeaderSize; __ mov(FieldOperand(elements.reg(), offset), prop_value.reg()); // Update the write barrier for the array address. @@ -5388,12 +5388,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } else { Load(node->expression()); switch (op) { - case Token::NOT: - case Token::DELETE: - case Token::TYPEOF: - UNREACHABLE(); // handled above - break; - case Token::SUB: { bool overwrite = (node->AsBinaryOperation() != NULL && @@ -5448,6 +5442,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } default: + // NOT, DELETE, TYPEOF, and VOID are handled outside the + // switch. UNREACHABLE(); } } @@ -6309,7 +6305,7 @@ void Reference::GetValue(TypeofState typeof_state) { __ mov(index.reg(), key.reg()); __ sar(index.reg(), kSmiTagSize); __ cmp(index.reg(), - FieldOperand(elements.reg(), Array::kLengthOffset)); + FieldOperand(elements.reg(), FixedArray::kLengthOffset)); deferred->Branch(above_equal); // Load and check that the result is not the hole. We could @@ -6323,7 +6319,7 @@ void Reference::GetValue(TypeofState typeof_state) { __ mov(value.reg(), Operand(elements.reg(), index.reg(), times_4, - Array::kHeaderSize - kHeapObjectTag)); + FixedArray::kHeaderSize - kHeapObjectTag)); elements.Unuse(); index.Unuse(); __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); @@ -6495,7 +6491,7 @@ void Reference::SetValue(InitState init_state) { __ mov(Operand(tmp.reg(), key.reg(), times_2, - Array::kHeaderSize - kHeapObjectTag), + FixedArray::kHeaderSize - kHeapObjectTag), value.reg()); __ IncrementCounter(&Counters::keyed_store_inline, 1); diff --git a/V8Binding/v8/src/ia32/ic-ia32.cc b/V8Binding/v8/src/ia32/ic-ia32.cc index 90e0fd1..d64dee1 100644 --- a/V8Binding/v8/src/ia32/ic-ia32.cc +++ b/V8Binding/v8/src/ia32/ic-ia32.cc @@ -43,6 +43,10 @@ namespace internal { // Helper function used to load a property from a dictionary backing storage. +// This function may return false negatives, so miss_label +// must always call a backup property load that is complete. +// This function is safe to call if the receiver has fast properties, +// or if name is not a symbol, and will jump to the miss_label in that case. static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, Register r0, Register r1, Register r2, Register name) { @@ -56,7 +60,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // // r2 - used to hold the capacity of the property dictionary. // - // name - holds the name of the property and is unchanges. + // name - holds the name of the property and is unchanged. Label done; @@ -89,7 +93,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // Compute the capacity mask. const int kCapacityOffset = - Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; __ mov(r2, FieldOperand(r0, kCapacityOffset)); __ shr(r2, kSmiTagSize); // convert smi to int __ dec(r2); @@ -99,7 +104,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // cover ~93% of loads from dictionaries. static const int kProbes = 4; const int kElementsStartOffset = - Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; for (int i = 0; i < kProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(r1, FieldOperand(name, String::kLengthOffset)); @@ -153,6 +159,9 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss, } +// The offset from the inlined patch site to the start of the +// inlined load instruction. It is 7 bytes (test eax, imm) plus +// 6 bytes (jne slow_label). const int LoadIC::kOffsetToLoadInstruction = 13; @@ -263,21 +272,28 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { Immediate(Factory::hash_table_map())); __ j(equal, &slow, not_taken); // Check that the key (index) is within bounds. - __ cmp(eax, FieldOperand(ecx, Array::kLengthOffset)); + __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset)); __ j(below, &fast, taken); // Slow case: Load name and receiver from stack and jump to runtime. __ bind(&slow); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1); KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty)); - // Check if the key is a symbol that is not an array index. + __ bind(&check_string); + // The key is not a smi. + // Is it a string? + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx); + __ j(above_equal, &slow); + // Is the string an array index, with cached numeric value? __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); __ test(ebx, Immediate(String::kIsArrayIndexMask)); __ j(not_zero, &index_string, not_taken); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + + // If the string is a symbol, do a quick inline probe of the receiver's + // dictionary, if it exists. + __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset)); __ test(ebx, Immediate(kIsSymbolMask)); - __ j(not_zero, &slow, not_taken); + __ j(zero, &slow, not_taken); // Probe the dictionary leaving result in ecx. GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax); GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx); @@ -301,7 +317,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ jmp(&index_int); // Fast case: Do the load. __ bind(&fast); - __ mov(eax, Operand(ecx, eax, times_4, Array::kHeaderSize - kHeapObjectTag)); + __ mov(eax, + Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag)); __ cmp(Operand(eax), Immediate(Factory::the_hole_value())); // In case the loaded value is the_hole we have to consult GetProperty // to ensure the prototype chain is searched. @@ -419,7 +436,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // eax: value // ecx: FixedArray // ebx: index (as a smi) - __ mov(Operand(ecx, ebx, times_2, Array::kHeaderSize - kHeapObjectTag), eax); + __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag), + eax); // Update write barrier for the elements array address. __ mov(edx, Operand(eax)); __ RecordWrite(ecx, 0, edx, ebx); diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc index 479b8ca..fae1525 100644 --- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc +++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc @@ -79,7 +79,7 @@ static void RecordWriteHelper(MacroAssembler* masm, // Add the page header, array header, and array body size to the page // address. masm->add(Operand(object), Immediate(Page::kObjectStartOffset - + Array::kHeaderSize)); + + FixedArray::kHeaderSize)); masm->add(object, Operand(scratch)); @@ -199,9 +199,10 @@ void MacroAssembler::RecordWrite(Register object, int offset, lea(dst, Operand(object, offset)); } else { // array access: calculate the destination address in the same manner as - // KeyedStoreIC::GenerateGeneric - lea(dst, - Operand(object, dst, times_2, Array::kHeaderSize - kHeapObjectTag)); + // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset + // into an array of words. + lea(dst, Operand(object, dst, times_2, + FixedArray::kHeaderSize - kHeapObjectTag)); } // If we are already generating a shared stub, not inlining the // record write code isn't going to save us any memory. diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc index 04a5390..c5d7c05 100644 --- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -1073,10 +1073,12 @@ int RegExpMacroAssemblerIA32::CaseInsensitiveCompareUC16(Address byte_offset1, unibrow::uchar c1 = substring1[i]; unibrow::uchar c2 = substring2[i]; if (c1 != c2) { - canonicalize.get(c1, '\0', &c1); - if (c1 != c2) { - canonicalize.get(c2, '\0', &c2); - if (c1 != c2) { + unibrow::uchar s1[1] = { c1 }; + canonicalize.get(c1, '\0', s1); + if (s1[0] != c2) { + unibrow::uchar s2[1] = { c2 }; + canonicalize.get(c2, '\0', s2); + if (s1[0] != s2[0]) { return 0; } } diff --git a/V8Binding/v8/src/ia32/register-allocator-ia32-inl.h b/V8Binding/v8/src/ia32/register-allocator-ia32-inl.h index ddee472..99ae6eb 100644 --- a/V8Binding/v8/src/ia32/register-allocator-ia32-inl.h +++ b/V8Binding/v8/src/ia32/register-allocator-ia32-inl.h @@ -49,7 +49,7 @@ bool RegisterAllocator::IsReserved(Register reg) { int RegisterAllocator::ToNumber(Register reg) { ASSERT(reg.is_valid() && !IsReserved(reg)); - static int numbers[] = { + const int kNumbers[] = { 0, // eax 2, // ecx 3, // edx @@ -59,14 +59,14 @@ int RegisterAllocator::ToNumber(Register reg) { -1, // esi 4 // edi }; - return numbers[reg.code()]; + return kNumbers[reg.code()]; } Register RegisterAllocator::ToRegister(int num) { ASSERT(num >= 0 && num < kNumRegisters); - static Register registers[] = { eax, ebx, ecx, edx, edi }; - return registers[num]; + const Register kRegisters[] = { eax, ebx, ecx, edx, edi }; + return kRegisters[num]; } diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc index 0a887d5..e47ad1c 100644 --- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc +++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc @@ -266,15 +266,13 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, __ mov(dst, FieldOperand(src, offset)); } else { // Calculate the offset into the properties array. - int offset = index * kPointerSize + Array::kHeaderSize; + int offset = index * kPointerSize + FixedArray::kHeaderSize; __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset)); __ mov(dst, FieldOperand(dst, offset)); } } - - void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); Code* code = NULL; @@ -349,7 +347,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ RecordWrite(receiver_reg, offset, name_reg, scratch); } else { // Write to the properties array. - int offset = index * kPointerSize + Array::kHeaderSize; + int offset = index * kPointerSize + FixedArray::kHeaderSize; // Get the properties array (optimistically). __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); __ mov(FieldOperand(scratch, offset), eax); |