diff options
115 files changed, 4919 insertions, 868 deletions
diff --git a/V8Binding/v8/AUTHORS b/V8Binding/v8/AUTHORS index bfe58a2..5c5ae4e 100644 --- a/V8Binding/v8/AUTHORS +++ b/V8Binding/v8/AUTHORS @@ -17,3 +17,4 @@ Paolo Giarrusso <p.giarrusso@gmail.com> Rafal Krypa <rafal@krypa.net> Rene Rebe <rene@exactcode.de> Ryan Dahl <coldredlemur@gmail.com> +Patrick Gansterer <paroga@paroga.com> diff --git a/V8Binding/v8/ChangeLog b/V8Binding/v8/ChangeLog index 7ea34a5..b07e7cc 100644 --- a/V8Binding/v8/ChangeLog +++ b/V8Binding/v8/ChangeLog @@ -1,3 +1,22 @@ +2009-09-02: Version 1.3.9 + + Optimized stack guard checks on ARM. + + Optimized API operations by inlining more in the API. + + Optimized creation of objects from simple constructor functions. + + Enabled a number of missing optimizations in the 64-bit port. + + Implemented native-code support for regular expressions on ARM. + + Stopped using the 'sahf' instruction on 64-bit machines that do + not support it. + + Fixed a bug in the support for forceful termination of JavaScript + execution. + + 2009-08-26: Version 1.3.8 Changed the handling of idle notifications to allow idle diff --git a/V8Binding/v8/SConstruct b/V8Binding/v8/SConstruct index 4d1792f..ddd0190 100644 --- a/V8Binding/v8/SConstruct +++ b/V8Binding/v8/SConstruct @@ -99,12 +99,7 @@ LIBRARY_FLAGS = { 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'], 'CPPPATH': [join(root_dir, 'src')], 'regexp:native': { - 'arch:ia32' : { 'CPPDEFINES': ['V8_NATIVE_REGEXP'] - }, - 'arch:x64' : { - 'CPPDEFINES': ['V8_NATIVE_REGEXP'] - } }, 'mode:debug': { 'CPPDEFINES': ['V8_ENABLE_CHECKS'] @@ -263,6 +258,10 @@ V8_EXTRA_FLAGS = { 'all': { 'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800'] }, + 'library:shared': { + 'CPPDEFINES': ['BUILDING_V8_SHARED'], + 'LIBS': ['winmm', 'ws2_32'] + }, 'arch:ia32': { 'WARNINGFLAGS': ['/W3'] }, diff --git a/V8Binding/v8/include/v8.h b/V8Binding/v8/include/v8.h index 346050d..8cd49f8 100644 --- a/V8Binding/v8/include/v8.h +++ b/V8Binding/v8/include/v8.h @@ -979,8 +979,9 @@ class V8EXPORT String : public Primitive { public: explicit Utf8Value(Handle<v8::Value> obj); ~Utf8Value(); - char* operator*() const { return str_; } - int length() { return length_; } + char* operator*() { return str_; } + const char* operator*() const { return str_; } + int length() const { return length_; } private: char* str_; int length_; @@ -1001,8 +1002,9 @@ class V8EXPORT String : public Primitive { public: explicit AsciiValue(Handle<v8::Value> obj); ~AsciiValue(); - char* operator*() const { return str_; } - int length() { return length_; } + char* operator*() { return str_; } + const char* operator*() const { return str_; } + int length() const { return length_; } private: char* str_; int length_; @@ -1023,7 +1025,8 @@ class V8EXPORT String : public Primitive { explicit Value(Handle<v8::Value> obj); ~Value(); uint16_t* operator*() const { return str_; } - int length() { return length_; } + const uint16_t* operator*() { return str_; } + int length() const { return length_; } private: uint16_t* str_; int length_; diff --git a/V8Binding/v8/src/SConscript b/V8Binding/v8/src/SConscript index 6a38c1a..fee3fab 100755 --- a/V8Binding/v8/src/SConscript +++ b/V8Binding/v8/src/SConscript @@ -63,32 +63,22 @@ SOURCES = { 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc' ], - 'arch:ia32': { - 'all': [ - 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc', - 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc', - 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc', - 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc', - 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc', - 'ia32/virtual-frame-ia32.cc' - ], - 'regexp:native': [ - 'ia32/regexp-macro-assembler-ia32.cc', - ] - }, - 'arch:x64': { - 'all': [ - 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc', - 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', - 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc', - 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc', - 'x64/register-allocator-x64.cc', - 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc' - ], - 'regexp:native': [ - 'x64/regexp-macro-assembler-x64.cc' - ] - }, + 'arch:ia32': [ + 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc', + 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc', + 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc', + 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc', + 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc', + 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc' + ], + 'arch:x64': [ + 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc', + 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', + 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc', + 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc', + 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc', + 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc' + ], 'simulator:arm': ['arm/simulator-arm.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:linux': ['platform-linux.cc', 'platform-posix.cc'], diff --git a/V8Binding/v8/src/api.cc b/V8Binding/v8/src/api.cc index d92a0e0..1128d3e 100644 --- a/V8Binding/v8/src/api.cc +++ b/V8Binding/v8/src/api.cc @@ -75,7 +75,7 @@ namespace v8 { i::V8::FatalProcessOutOfMemory(NULL); \ } \ bool call_depth_is_zero = thread_local.CallDepthIsZero(); \ - i::Top::OptionalRescheduleException(call_depth_is_zero, false); \ + i::Top::OptionalRescheduleException(call_depth_is_zero); \ return value; \ } \ } while (false) @@ -427,7 +427,7 @@ void Context::Enter() { i::Handle<i::Context> env = Utils::OpenHandle(this); thread_local.EnterContext(env); - thread_local.SaveContext(i::GlobalHandles::Create(i::Top::context())); + thread_local.SaveContext(i::Top::context()); i::Top::set_context(*env); } @@ -441,9 +441,8 @@ void Context::Exit() { } // Content of 'last_context' could be NULL. - i::Handle<i::Object> last_context = thread_local.RestoreContext(); - i::Top::set_context(static_cast<i::Context*>(*last_context)); - i::GlobalHandles::Destroy(last_context.location()); + i::Context* last_context = thread_local.RestoreContext(); + i::Top::set_context(last_context); } @@ -3700,19 +3699,21 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) { } -void HandleScopeImplementer::Iterate( - ObjectVisitor* v, - List<i::Object**>* blocks, - v8::ImplementationUtilities::HandleScopeData* handle_data) { +void HandleScopeImplementer::IterateThis(ObjectVisitor* v) { // Iterate over all handles in the blocks except for the last. - for (int i = blocks->length() - 2; i >= 0; --i) { - Object** block = blocks->at(i); + for (int i = Blocks()->length() - 2; i >= 0; --i) { + Object** block = Blocks()->at(i); v->VisitPointers(block, &block[kHandleBlockSize]); } // Iterate over live handles in the last block (if any). - if (!blocks->is_empty()) { - v->VisitPointers(blocks->last(), handle_data->next); + if (!Blocks()->is_empty()) { + v->VisitPointers(Blocks()->last(), handle_scope_data_.next); + } + + if (!saved_contexts_.is_empty()) { + Object** start = reinterpret_cast<Object**>(&saved_contexts_.first()); + v->VisitPointers(start, start + saved_contexts_.length()); } } @@ -3720,18 +3721,15 @@ void HandleScopeImplementer::Iterate( void HandleScopeImplementer::Iterate(ObjectVisitor* v) { v8::ImplementationUtilities::HandleScopeData* current = v8::ImplementationUtilities::CurrentHandleScope(); - Iterate(v, thread_local.Blocks(), current); + thread_local.handle_scope_data_ = *current; + thread_local.IterateThis(v); } char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) { HandleScopeImplementer* thread_local = reinterpret_cast<HandleScopeImplementer*>(storage); - List<internal::Object**>* blocks_of_archived_thread = thread_local->Blocks(); - v8::ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread = - &thread_local->handle_scope_data_; - Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread); - + thread_local->IterateThis(v); return storage + ArchiveSpacePerThread(); } diff --git a/V8Binding/v8/src/api.h b/V8Binding/v8/src/api.h index ca8f523..9ae6307 100644 --- a/V8Binding/v8/src/api.h +++ b/V8Binding/v8/src/api.h @@ -352,8 +352,8 @@ class HandleScopeImplementer { // contexts have been entered. inline Handle<Object> LastEnteredContext(); - inline void SaveContext(Handle<Object> context); - inline Handle<Object> RestoreContext(); + inline void SaveContext(Context* context); + inline Context* RestoreContext(); inline bool HasSavedContexts(); inline List<internal::Object**>* Blocks() { return &blocks; } @@ -368,14 +368,12 @@ class HandleScopeImplementer { // Used as a stack to keep track of entered contexts. List<Handle<Object> > entered_contexts_; // Used as a stack to keep track of saved contexts. - List<Handle<Object> > saved_contexts_; + List<Context*> saved_contexts_; bool ignore_out_of_memory; // This is only used for threading support. v8::ImplementationUtilities::HandleScopeData handle_scope_data_; - static void Iterate(ObjectVisitor* v, - List<internal::Object**>* blocks, - v8::ImplementationUtilities::HandleScopeData* handle_data); + void IterateThis(ObjectVisitor* v); char* RestoreThreadHelper(char* from); char* ArchiveThreadHelper(char* to); @@ -386,12 +384,12 @@ class HandleScopeImplementer { static const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page -void HandleScopeImplementer::SaveContext(Handle<Object> context) { +void HandleScopeImplementer::SaveContext(Context* context) { saved_contexts_.Add(context); } -Handle<Object> HandleScopeImplementer::RestoreContext() { +Context* HandleScopeImplementer::RestoreContext() { return saved_contexts_.RemoveLast(); } diff --git a/V8Binding/v8/src/arm/assembler-arm-inl.h b/V8Binding/v8/src/arm/assembler-arm-inl.h index 4dda7ec..cb5faa2 100644 --- a/V8Binding/v8/src/arm/assembler-arm-inl.h +++ b/V8Binding/v8/src/arm/assembler-arm-inl.h @@ -204,7 +204,7 @@ void Assembler::CheckBuffer() { if (buffer_space() <= kGap) { GrowBuffer(); } - if (pc_offset() > next_buffer_check_) { + if (pc_offset() >= next_buffer_check_) { CheckConstPool(false, true); } } diff --git a/V8Binding/v8/src/arm/assembler-arm.cc b/V8Binding/v8/src/arm/assembler-arm.cc index 3ed99f9..8bd06db 100644 --- a/V8Binding/v8/src/arm/assembler-arm.cc +++ b/V8Binding/v8/src/arm/assembler-arm.cc @@ -329,19 +329,30 @@ const int kEndOfChain = -4; int Assembler::target_at(int pos) { Instr instr = instr_at(pos); + if ((instr & ~Imm24Mask) == 0) { + // Emitted label constant, not part of a branch. + return instr - (Code::kHeaderSize - kHeapObjectTag); + } ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 int imm26 = ((instr & Imm24Mask) << 8) >> 6; if ((instr & CondMask) == nv && (instr & B24) != 0) // blx uses bit 24 to encode bit 2 of imm26 imm26 += 2; - return pos + 8 + imm26; + return pos + kPcLoadDelta + imm26; } void Assembler::target_at_put(int pos, int target_pos) { - int imm26 = target_pos - pos - 8; Instr instr = instr_at(pos); + if ((instr & ~Imm24Mask) == 0) { + ASSERT(target_pos == kEndOfChain || target_pos >= 0); + // Emitted label constant, not part of a branch. + // Make label relative to Code* of generated Code object. + instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + return; + } + int imm26 = target_pos - (pos + kPcLoadDelta); ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 if ((instr & CondMask) == nv) { // blx uses bit 24 to encode bit 2 of imm26 @@ -368,41 +379,45 @@ void Assembler::print(Label* L) { while (l.is_linked()) { PrintF("@ %d ", l.pos()); Instr instr = instr_at(l.pos()); - ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx - int cond = instr & CondMask; - const char* b; - const char* c; - if (cond == nv) { - b = "blx"; - c = ""; + if ((instr & ~Imm24Mask) == 0) { + PrintF("value\n"); } else { - if ((instr & B24) != 0) - b = "bl"; - else - b = "b"; - - switch (cond) { - case eq: c = "eq"; break; - case ne: c = "ne"; break; - case hs: c = "hs"; break; - case lo: c = "lo"; break; - case mi: c = "mi"; break; - case pl: c = "pl"; break; - case vs: c = "vs"; break; - case vc: c = "vc"; break; - case hi: c = "hi"; break; - case ls: c = "ls"; break; - case ge: c = "ge"; break; - case lt: c = "lt"; break; - case gt: c = "gt"; break; - case le: c = "le"; break; - case al: c = ""; break; - default: - c = ""; - UNREACHABLE(); + ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx + int cond = instr & CondMask; + const char* b; + const char* c; + if (cond == nv) { + b = "blx"; + c = ""; + } else { + if ((instr & B24) != 0) + b = "bl"; + else + b = "b"; + + switch (cond) { + case eq: c = "eq"; break; + case ne: c = "ne"; break; + case hs: c = "hs"; break; + case lo: c = "lo"; break; + case mi: c = "mi"; break; + case pl: c = "pl"; break; + case vs: c = "vs"; break; + case vc: c = "vc"; break; + case hi: c = "hi"; break; + case ls: c = "ls"; break; + case ge: c = "ge"; break; + case lt: c = "lt"; break; + case gt: c = "gt"; break; + case le: c = "le"; break; + case al: c = ""; break; + default: + c = ""; + UNREACHABLE(); + } } + PrintF("%s%s\n", b, c); } - PrintF("%s%s\n", b, c); next(&l); } } else { @@ -670,8 +685,23 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { // Block the emission of the constant pool, since the branch instruction must // be emitted at the pc offset recorded by the label BlockConstPoolBefore(pc_offset() + kInstrSize); + return target_pos - (pc_offset() + kPcLoadDelta); +} - return target_pos - pc_offset() - 8; + +void Assembler::label_at_put(Label* L, int at_offset) { + int target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link + } else { + target_pos = kEndOfChain; + } + L->link_to(at_offset); + instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); + } } diff --git a/V8Binding/v8/src/arm/assembler-arm.h b/V8Binding/v8/src/arm/assembler-arm.h index b3ebb8b..63f0447 100644 --- a/V8Binding/v8/src/arm/assembler-arm.h +++ b/V8Binding/v8/src/arm/assembler-arm.h @@ -39,7 +39,7 @@ #ifndef V8_ARM_ASSEMBLER_ARM_H_ #define V8_ARM_ASSEMBLER_ARM_H_ - +#include <stdio.h> #include "assembler.h" namespace v8 { @@ -165,9 +165,10 @@ enum Coprocessor { enum Condition { eq = 0 << 28, // Z set equal. ne = 1 << 28, // Z clear not equal. - cs = 2 << 28, // C set unsigned higher or same. + nz = 1 << 28, // Z clear not zero. + cs = 2 << 28, // C set carry set. hs = 2 << 28, // C set unsigned higher or same. - cc = 3 << 28, // C clear unsigned lower. + cc = 3 << 28, // C clear carry clear. lo = 3 << 28, // C clear unsigned lower. mi = 4 << 28, // N set negative. pl = 5 << 28, // N clear positive or zero. @@ -420,6 +421,10 @@ class Assembler : public Malloced { // Manages the jump elimination optimization if the second parameter is true. int branch_offset(Label* L, bool jump_elimination_allowed); + // Puts a labels target address at the given position. + // The high 8 bits are set to zero. + void label_at_put(Label* L, int at_offset); + // Return the address in the constant pool of the code target address used by // the branch/call instruction at pc. INLINE(static Address target_address_address_at(Address pc)); @@ -435,6 +440,10 @@ class Assembler : public Malloced { // to jump to. static const int kPatchReturnSequenceAddressOffset = 1; + // Difference between address of current opcode and value read from pc + // register. + static const int kPcLoadDelta = 8; + // --------------------------------------------------------------------------- // Code generation @@ -784,6 +793,8 @@ class Assembler : public Malloced { // Record reloc info for current pc_ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + + friend class RegExpMacroAssemblerARM; }; } } // namespace v8::internal diff --git a/V8Binding/v8/src/arm/builtins-arm.cc b/V8Binding/v8/src/arm/builtins-arm.cc index daf2378..920110f 100644 --- a/V8Binding/v8/src/arm/builtins-arm.cc +++ b/V8Binding/v8/src/arm/builtins-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -88,23 +88,200 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Enter a construct frame. __ EnterConstructFrame(); - // Preserve the two incoming parameters + // Preserve the two incoming parameters on the stack. __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ push(r0); // smi-tagged arguments count - __ push(r1); // constructor function + __ push(r0); // Smi-tagged arguments count. + __ push(r1); // Constructor function. + + // Use r7 for holding undefined which is used in several places below. + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + + // Try to allocate the object without transitioning into C code. If any of the + // preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; +#ifdef ENABLE_DEBUGGER_SUPPORT + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(); + __ mov(r2, Operand(debug_step_in_fp)); + __ ldr(r2, MemOperand(r2)); + __ tst(r2, r2); + __ b(nz, &rt_call); +#endif + + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + // r7: undefined + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &rt_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see comments + // in Runtime_NewObject in runtime.cc). In which case the initial map's + // instance type would be JS_FUNCTION_TYPE. + // r1: constructor function + // r2: initial map + // r7: undefined + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ b(eq, &rt_call); + + // Now allocate the JSObject on the heap. + // r1: constructor function + // r2: initial map + // r7: undefined + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS); + + // Allocated the JSObject, now initialize the fields. Map is set to initial + // map and properties and elements are set to empty fixed array. + // r1: constructor function + // r2: initial map + // r3: object size + // r4: JSObject (not tagged) + // r7: undefined + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Fill all the in-object properties with undefined. + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + // r7: undefined + __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + { Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r5, Operand(r6)); + __ b(lt, &loop); + } + + // Add the object tag to make the JSObject real, so that we can continue and + // jump into the continuation code at any time from now on. Any failures + // need to undo the allocation, so that the heap is in a consistent state + // and verifiable. + __ add(r4, r4, Operand(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. Continue with allocated + // object if not fall through to runtime call if it is. + // r1: constructor function + // r4: JSObject + // r5: start of next object (not tagged) + // r7: undefined + __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields and + // in-object properties. + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ and_(r6, + r0, + Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8)); + __ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8)); + __ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8)); + __ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC); + + // Done if no extra properties are to be allocated. + __ b(eq, &allocated); + __ Assert(pl, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: start of next object + // r7: undefined + __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateObjectInNewSpace(r0, + r5, + r6, + r2, + &undo_allocation, + RESULT_CONTAINS_TOP); + + // Initialize the FixedArray. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + // r7: undefined + __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); + __ mov(r2, r5); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset); + __ str(r3, MemOperand(r2, kPointerSize, PostIndex)); + + // Initialize the fields to undefined. + // r1: constructor function + // r2: First element of FixedArray (not tagged) + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + // r7: undefined + __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r2, Operand(r6)); + __ b(lt, &loop); + } + + // Store the initialized FixedArray into the properties field of + // the JSObject + // r1: constructor function + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. + __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); + + // Continue with JSObject being successfully allocated + // r1: constructor function + // r4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // r4: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(r4, r5); + } - // Allocate the new receiver object. + // Allocate the new receiver object using the runtime call. + // r1: constructor function + __ bind(&rt_call); __ push(r1); // argument for Runtime_NewObject __ CallRuntime(Runtime::kNewObject, 1); - __ push(r0); // save the receiver + __ mov(r4, r0); + + // Receiver for constructor call allocated. + // r4: JSObject + __ bind(&allocated); + __ push(r4); // Push the function and the allocated receiver from the stack. // sp[0]: receiver (newly allocated object) // sp[1]: constructor function // sp[2]: number of arguments (smi-tagged) __ ldr(r1, MemOperand(sp, kPointerSize)); - __ push(r1); // function - __ push(r0); // receiver + __ push(r1); // Constructor function. + __ push(r4); // Receiver. // Reload the number of arguments from the stack. // r1: constructor function @@ -194,6 +371,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveConstructFrame(); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(kPointerSize)); + __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2); __ Jump(lr); } diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc index 5c8b777..4c87e06 100644 --- a/V8Binding/v8/src/arm/codegen-arm.cc +++ b/V8Binding/v8/src/arm/codegen-arm.cc @@ -176,7 +176,8 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) { } #endif - // Allocate space for locals and initialize them. + // Allocate space for locals and initialize them. This also checks + // for stack overflow. frame_->AllocateStackSlots(); // Initialize the function return target after the locals are set // up, because it needs the expected frame height from the frame. @@ -278,7 +279,6 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) { frame_->CallRuntime(Runtime::kTraceEnter, 0); // Ignore the return value. } - CheckStack(); // Compile the body of the function in a vanilla state. Don't // bother compiling all the code if the scope has an illegal @@ -1111,9 +1111,18 @@ void CodeGenerator::CheckStack() { if (FLAG_check_stack) { Comment cmnt(masm_, "[ check stack"); __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); + // Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is + // added to the implicit 8 byte offset that always applies to operations + // with pc and gives a return address 12 bytes down. + masm_->add(lr, pc, Operand(sizeof(Instr))); + masm_->cmp(sp, Operand(ip)); StackCheckStub stub; - __ CallStub(&stub, lo); // Call the stub if lower. + // Call the stub if lower. + masm_->mov(pc, + Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), + RelocInfo::CODE_TARGET), + LeaveCC, + lo); } } @@ -4936,36 +4945,21 @@ void CompareStub::Generate(MacroAssembler* masm) { static void AllocateHeapNumber( MacroAssembler* masm, Label* need_gc, // Jump here if young space is full. - Register result_reg, // The tagged address of the new heap number. - Register allocation_top_addr_reg, // A scratch register. + Register result, // The tagged address of the new heap number. + Register scratch1, // A scratch register. Register scratch2) { // Another scratch register. - ExternalReference allocation_top = - ExternalReference::new_space_allocation_top_address(); - ExternalReference allocation_limit = - ExternalReference::new_space_allocation_limit_address(); - - // allocat := the address of the allocation top variable. - __ mov(allocation_top_addr_reg, Operand(allocation_top)); - // result_reg := the old allocation top. - __ ldr(result_reg, MemOperand(allocation_top_addr_reg)); - // scratch2 := the address of the allocation limit. - __ mov(scratch2, Operand(allocation_limit)); - // scratch2 := the allocation limit. - __ ldr(scratch2, MemOperand(scratch2)); - // result_reg := the new allocation top. - __ add(result_reg, result_reg, Operand(HeapNumber::kSize)); - // Compare new new allocation top and limit. - __ cmp(result_reg, Operand(scratch2)); - // Branch if out of space in young generation. - __ b(hi, need_gc); - // Store new allocation top. - __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top - // Tag and adjust back to start of new object. - __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag)); - // Get heap number map into scratch2. - __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); - // Store heap number map in new object. - __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset)); + // Allocate an object in the heap for the heap number and tag it as a heap + // object. + __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize, + result, + scratch1, + scratch2, + need_gc, + TAG_OBJECT); + + // Get heap number map and store it in the allocated object. + __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); + __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); } @@ -5673,9 +5667,9 @@ void UnarySubStub::Generate(MacroAssembler* masm) { __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); } else { AllocateHeapNumber(masm, &slow, r1, r2, r3); - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ str(r2, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); __ mov(r0, Operand(r1)); diff --git a/V8Binding/v8/src/arm/disasm-arm.cc b/V8Binding/v8/src/arm/disasm-arm.cc index 0abe35b..2638409 100644 --- a/V8Binding/v8/src/arm/disasm-arm.cc +++ b/V8Binding/v8/src/arm/disasm-arm.cc @@ -119,6 +119,7 @@ class Decoder { void DecodeType5(Instr* instr); void DecodeType6(Instr* instr); void DecodeType7(Instr* instr); + void DecodeUnconditional(Instr* instr); const disasm::NameConverter& converter_; v8::internal::Vector<char> out_buffer_; @@ -774,6 +775,67 @@ void Decoder::DecodeType7(Instr* instr) { } +void Decoder::DecodeUnconditional(Instr* instr) { + if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) { + Format(instr, "'memop'h'pu 'rd, "); + bool immediate = instr->HasB(); + switch (instr->PUField()) { + case 0: { + // Post index, negative. + if (instr->HasW()) { + Unknown(instr); + break; + } + if (immediate) { + Format(instr, "['rn], #-'imm12"); + } else { + Format(instr, "['rn], -'rm"); + } + break; + } + case 1: { + // Post index, positive. + if (instr->HasW()) { + Unknown(instr); + break; + } + if (immediate) { + Format(instr, "['rn], #+'imm12"); + } else { + Format(instr, "['rn], +'rm"); + } + break; + } + case 2: { + // Pre index or offset, negative. + if (immediate) { + Format(instr, "['rn, #-'imm12]'w"); + } else { + Format(instr, "['rn, -'rm]'w"); + } + break; + } + case 3: { + // Pre index or offset, positive. + if (immediate) { + Format(instr, "['rn, #+'imm12]'w"); + } else { + Format(instr, "['rn, +'rm]'w"); + } + break; + } + default: { + // The PU field is a 2-bit field. + UNREACHABLE(); + break; + } + } + return; + } + Format(instr, "break 'msg"); +} + + // Disassemble the instruction at *instr_ptr into the output buffer. int Decoder::InstructionDecode(byte* instr_ptr) { Instr* instr = Instr::At(instr_ptr); @@ -782,7 +844,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) { "%08x ", instr->InstructionBits()); if (instr->ConditionField() == special_condition) { - Format(instr, "break 'msg"); + DecodeUnconditional(instr); return Instr::kInstrSize; } switch (instr->TypeField()) { diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.cc b/V8Binding/v8/src/arm/macro-assembler-arm.cc index 65c2a3e..c77209e 100644 --- a/V8Binding/v8/src/arm/macro-assembler-arm.cc +++ b/V8Binding/v8/src/arm/macro-assembler-arm.cc @@ -768,11 +768,139 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +void MacroAssembler::AllocateObjectInNewSpace(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { + ASSERT(!result.is(scratch1)); + ASSERT(!scratch1.is(scratch2)); + + // Load address of new object into result and allocation top address into + // scratch1. + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(); + mov(scratch1, Operand(new_space_allocation_top)); + if ((flags & RESULT_CONTAINS_TOP) == 0) { + ldr(result, MemOperand(scratch1)); + } else { +#ifdef DEBUG + // Assert that result actually contains top on entry. scratch2 is used + // immediately below so this use of scratch2 does not cause difference with + // respect to register content between debug and release mode. + ldr(scratch2, MemOperand(scratch1)); + cmp(result, scratch2); + Check(eq, "Unexpected allocation top"); +#endif + } + + // Calculate new top and bail out if new space is exhausted. Use result + // to calculate the new top. + ExternalReference new_space_allocation_limit = + ExternalReference::new_space_allocation_limit_address(); + mov(scratch2, Operand(new_space_allocation_limit)); + ldr(scratch2, MemOperand(scratch2)); + add(result, result, Operand(object_size * kPointerSize)); + cmp(result, Operand(scratch2)); + b(hi, gc_required); + + // Update allocation top. result temporarily holds the new top, + str(result, MemOperand(scratch1)); + + // Tag and adjust back to start of new object. + if ((flags & TAG_OBJECT) != 0) { + sub(result, result, Operand((object_size * kPointerSize) - + kHeapObjectTag)); + } else { + sub(result, result, Operand(object_size * kPointerSize)); + } +} + + +void MacroAssembler::AllocateObjectInNewSpace(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { + ASSERT(!result.is(scratch1)); + ASSERT(!scratch1.is(scratch2)); + + // Load address of new object into result and allocation top address into + // scratch1. + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(); + mov(scratch1, Operand(new_space_allocation_top)); + if ((flags & RESULT_CONTAINS_TOP) == 0) { + ldr(result, MemOperand(scratch1)); + } else { +#ifdef DEBUG + // Assert that result actually contains top on entry. scratch2 is used + // immediately below so this use of scratch2 does not cause difference with + // respect to register content between debug and release mode. + ldr(scratch2, MemOperand(scratch1)); + cmp(result, scratch2); + Check(eq, "Unexpected allocation top"); +#endif + } + + // Calculate new top and bail out if new space is exhausted. Use result + // to calculate the new top. Object size is in words so a shift is required to + // get the number of bytes + ExternalReference new_space_allocation_limit = + ExternalReference::new_space_allocation_limit_address(); + mov(scratch2, Operand(new_space_allocation_limit)); + ldr(scratch2, MemOperand(scratch2)); + add(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + cmp(result, Operand(scratch2)); + b(hi, gc_required); + + // Update allocation top. result temporarily holds the new top, + str(result, MemOperand(scratch1)); + + // Adjust back to start of new object. + sub(result, result, Operand(object_size, LSL, kPointerSizeLog2)); + + // Tag object if requested. + if ((flags & TAG_OBJECT) != 0) { + add(result, result, Operand(kHeapObjectTag)); + } +} + + +void MacroAssembler::UndoAllocationInNewSpace(Register object, + Register scratch) { + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(); + + // Make sure the object has no tag before resetting top. + and_(object, object, Operand(~kHeapObjectTagMask)); +#ifdef DEBUG + // Check that the object un-allocated is below the current top. + mov(scratch, Operand(new_space_allocation_top)); + ldr(scratch, MemOperand(scratch)); + cmp(object, scratch); + Check(lt, "Undo allocation of non allocated memory"); +#endif + // Write the address of the object to un-allocate as the current top. + mov(scratch, Operand(new_space_allocation_top)); + str(object, MemOperand(scratch)); +} + + void MacroAssembler::CompareObjectType(Register function, Register map, Register type_reg, InstanceType type) { ldr(map, FieldMemOperand(function, HeapObject::kMapOffset)); + CompareInstanceType(map, type_reg, type); +} + + +void MacroAssembler::CompareInstanceType(Register map, + Register type_reg, + InstanceType type) { ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); cmp(type_reg, Operand(type)); } @@ -1022,4 +1150,5 @@ void MacroAssembler::Abort(const char* msg) { // will not return here } + } } // namespace v8::internal diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.h b/V8Binding/v8/src/arm/macro-assembler-arm.h index e4758cc..ad4b174 100644 --- a/V8Binding/v8/src/arm/macro-assembler-arm.h +++ b/V8Binding/v8/src/arm/macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -38,34 +38,11 @@ namespace internal { const Register cp = { 8 }; // JavaScript context pointer -// Helper types to make boolean flag easier to read at call-site. -enum InvokeFlag { - CALL_FUNCTION, - JUMP_FUNCTION -}; - enum InvokeJSFlags { CALL_JS, JUMP_JS }; -enum ExitJSFlag { - RETURN, - DO_NOT_RETURN -}; - -enum CodeLocation { - IN_JAVASCRIPT, - IN_JS_ENTRY, - IN_C_ENTRY -}; - -enum HandlerType { - TRY_CATCH_HANDLER, - TRY_FINALLY_HANDLER, - JS_ENTRY_HANDLER -}; - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { @@ -188,6 +165,32 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- + // Allocation support + + // Allocate an object in new space. The object_size is specified in words (not + // bytes). If the new space is exhausted control continues at the gc_required + // label. The allocated object is returned in result. If the flag + // tag_allocated_object is true the result is tagged as as a heap object. + void AllocateObjectInNewSpace(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + void AllocateObjectInNewSpace(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + + // Undo allocation in new space. The object passed and objects allocated after + // it will no longer be allocated. The caller must make sure that no pointers + // are left to the object(s) no longer allocated as they would be invalid when + // allocation is undone. + void UndoAllocationInNewSpace(Register object, Register scratch); + + // --------------------------------------------------------------------------- // Support functions. // Try to get function prototype of a function and puts the value in @@ -206,12 +209,21 @@ class MacroAssembler: public Assembler { // It leaves the map in the map register (unless the type_reg and map register // are the same register). It leaves the heap object in the heap_object // register unless the heap_object register is the same register as one of the - // other // registers. + // other registers. void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type); + // Compare instance type in a map. map contains a valid map object whose + // object type should be compared with the given type. This both + // sets the flags and leaves the object type in the type_reg register. It + // leaves the heap object in the heap_object register unless the heap_object + // register is the same register as type_reg. + void CompareInstanceType(Register map, + Register type_reg, + InstanceType type); + inline void BranchOnSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); diff --git a/V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc index 78ebc7e..2e75a61 100644 --- a/V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -26,19 +26,1205 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "v8.h" +#include "unicode.h" +#include "log.h" #include "ast.h" +#include "regexp-stack.h" +#include "macro-assembler.h" #include "regexp-macro-assembler.h" +#include "arm/macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h" namespace v8 { namespace internal { -RegExpMacroAssemblerARM::RegExpMacroAssemblerARM() { - UNIMPLEMENTED(); +#ifdef V8_NATIVE_REGEXP +/* + * This assembler uses the following register assignment convention + * - r5 : Pointer to current code object (Code*) including heap object tag. + * - r6 : Current position in input, as negative offset from end of string. + * Please notice that this is the byte offset, not the character offset! + * - r7 : Currently loaded character. Must be loaded using + * LoadCurrentCharacter before using any of the dispatch methods. + * - r8 : points to tip of backtrack stack + * - r9 : Unused, might be used by C code and expected unchanged. + * - r10 : End of input (points to byte after last character in input). + * - r11 : Frame pointer. Used to access arguments, local variables and + * RegExp registers. + * - r12 : IP register, used by assembler. Very volatile. + * - r13/sp : points to tip of C stack. + * + * The remaining registers are free for computations. + * + * Each call to a public method should retain this convention. + * The stack will have the following structure: + * - stack_area_base (High end of the memory area to use as + * backtracking stack) + * - at_start (if 1, start at start of string, if 0, don't) + * --- sp when called --- + * - link address + * - backup of registers r4..r11 + * - int* capture_array (int[num_saved_registers_], for output). + * - end of input (Address of end of string) + * - start of input (Address of first character in string) + * --- frame pointer ---- + * - void* input_string (location of a handle containing the string) + * - Offset of location before start of input (effectively character + * position -1). Used to initialize capture registers to a non-position. + * - register 0 (Only positions must be stored in the first + * - register 1 num_saved_registers_ registers) + * - ... + * - register num_registers-1 + * --- sp --- + * + * The first num_saved_registers_ registers are initialized to point to + * "character -1" in the string (i.e., char_size() bytes before the first + * character of the string). The remaining registers start out as garbage. + * + * The data up to the return address must be placed there by the calling + * code, by calling the code entry as cast to a function with the signature: + * int (*match)(String* input_string, + * Address start, + * Address end, + * int* capture_output_array, + * bool at_start, + * byte* stack_area_base) + * The call is performed by NativeRegExpMacroAssembler::Execute() + * (in regexp-macro-assembler.cc). + */ + +#define __ ACCESS_MASM(masm_) + +RegExpMacroAssemblerARM::RegExpMacroAssemblerARM( + Mode mode, + int registers_to_save) + : masm_(new MacroAssembler(NULL, kRegExpCodeSize)), + mode_(mode), + num_registers_(registers_to_save), + num_saved_registers_(registers_to_save), + entry_label_(), + start_label_(), + success_label_(), + backtrack_label_(), + exit_label_() { + ASSERT_EQ(0, registers_to_save % 2); + __ jmp(&entry_label_); // We'll write the entry code later. + EmitBacktrackConstantPool(); + __ bind(&start_label_); // And then continue from here. } -RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {} +RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() { + delete masm_; + // Unuse labels in case we throw away the assembler without calling GetCode. + entry_label_.Unuse(); + start_label_.Unuse(); + success_label_.Unuse(); + backtrack_label_.Unuse(); + exit_label_.Unuse(); + check_preempt_label_.Unuse(); + stack_overflow_label_.Unuse(); +} + + +int RegExpMacroAssemblerARM::stack_limit_slack() { + return RegExpStack::kStackLimitSlack; +} + + +void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) { + if (by != 0) { + Label inside_string; + __ add(current_input_offset(), + current_input_offset(), Operand(by * char_size())); + } +} + + +void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) { + ASSERT(reg >= 0); + ASSERT(reg < num_registers_); + if (by != 0) { + __ ldr(r0, register_location(reg)); + __ add(r0, r0, Operand(by)); + __ str(r0, register_location(reg)); + } +} + + +void RegExpMacroAssemblerARM::Backtrack() { + CheckPreemption(); + // Pop Code* offset from backtrack stack, add Code* and jump to location. + Pop(r0); + __ add(pc, r0, Operand(r5)); +} + + +void RegExpMacroAssemblerARM::Bind(Label* label) { + __ bind(label); +} + + +void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) { + __ cmp(current_character(), Operand(c)); + BranchOrBacktrack(eq, on_equal); +} + + +void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) { + __ cmp(current_character(), Operand(limit)); + BranchOrBacktrack(gt, on_greater); +} + + +void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { + Label not_at_start; + // Did we start the match at the start of the string at all? + __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ cmp(r0, Operand(0)); + BranchOrBacktrack(eq, ¬_at_start); + + // If we did, are we still at the start of the input? + __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); + __ add(r0, end_of_input_address(), Operand(current_input_offset())); + __ cmp(r0, r1); + BranchOrBacktrack(eq, on_at_start); + __ bind(¬_at_start); +} + + +void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { + // Did we start the match at the start of the string at all? + __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ cmp(r0, Operand(0)); + BranchOrBacktrack(eq, on_not_at_start); + // If we did, are we still at the start of the input? + __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); + __ add(r0, end_of_input_address(), Operand(current_input_offset())); + __ cmp(r0, r1); + BranchOrBacktrack(ne, on_not_at_start); +} + + +void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) { + __ cmp(current_character(), Operand(limit)); + BranchOrBacktrack(lt, on_less); +} + + +void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str, + int cp_offset, + Label* on_failure, + bool check_end_of_string) { + if (on_failure == NULL) { + // Instead of inlining a backtrack for each test, (re)use the global + // backtrack target. + on_failure = &backtrack_label_; + } + + if (check_end_of_string) { + // Is last character of required match inside string. + CheckPosition(cp_offset + str.length() - 1, on_failure); + } + + __ add(r0, end_of_input_address(), Operand(current_input_offset())); + if (cp_offset != 0) { + int byte_offset = cp_offset * char_size(); + __ add(r0, r0, Operand(byte_offset)); + } + + // r0 : Address of characters to match against str. + int stored_high_byte = 0; + for (int i = 0; i < str.length(); i++) { + if (mode_ == ASCII) { + __ ldrb(r1, MemOperand(r0, char_size(), PostIndex)); + ASSERT(str[i] <= String::kMaxAsciiCharCode); + __ cmp(r1, Operand(str[i])); + } else { + __ ldrh(r1, MemOperand(r0, char_size(), PostIndex)); + uc16 match_char = str[i]; + int match_high_byte = (match_char >> 8); + if (match_high_byte == 0) { + __ cmp(r1, Operand(str[i])); + } else { + if (match_high_byte != stored_high_byte) { + __ mov(r2, Operand(match_high_byte)); + stored_high_byte = match_high_byte; + } + __ add(r3, r2, Operand(match_char & 0xff)); + __ cmp(r1, r3); + } + } + BranchOrBacktrack(ne, on_failure); + } +} + + +void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) { + __ ldr(r0, MemOperand(backtrack_stackpointer(), 0)); + __ cmp(current_input_offset(), r0); + __ add(backtrack_stackpointer(), + backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq); + BranchOrBacktrack(eq, on_equal); +} + + +void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( + int start_reg, + Label* on_no_match) { + Label fallthrough; + __ ldr(r0, register_location(start_reg)); // Index of start of capture + __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture + __ sub(r1, r1, r0, SetCC); // Length of capture. + + // If length is zero, either the capture is empty or it is not participating. + // In either case succeed immediately. + __ b(eq, &fallthrough); + + // Check that there are enough characters left in the input. + __ cmn(r1, Operand(current_input_offset())); + BranchOrBacktrack(gt, on_no_match); + + if (mode_ == ASCII) { + Label success; + Label fail; + Label loop_check; + + // r0 - offset of start of capture + // r1 - length of capture + __ add(r0, r0, Operand(end_of_input_address())); + __ add(r2, end_of_input_address(), Operand(current_input_offset())); + __ add(r1, r0, Operand(r1)); + + // r0 - Address of start of capture. + // r1 - Address of end of capture + // r2 - Address of current input position. + + Label loop; + __ bind(&loop); + __ ldrb(r3, MemOperand(r0, char_size(), PostIndex)); + __ ldrb(r4, MemOperand(r2, char_size(), PostIndex)); + __ cmp(r4, r3); + __ b(eq, &loop_check); + + // Mismatch, try case-insensitive match (converting letters to lower-case). + __ orr(r3, r3, Operand(0x20)); // Convert capture character to lower-case. + __ orr(r4, r4, Operand(0x20)); // Also convert input character. + __ cmp(r4, r3); + __ b(ne, &fail); + __ sub(r3, r3, Operand('a')); + __ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter? + __ b(hi, &fail); + + + __ bind(&loop_check); + __ cmp(r0, r1); + __ b(lt, &loop); + __ jmp(&success); + + __ bind(&fail); + BranchOrBacktrack(al, on_no_match); + + __ bind(&success); + // Compute new value of character position after the matched part. + __ sub(current_input_offset(), r2, end_of_input_address()); + } else { + ASSERT(mode_ == UC16); + int argument_count = 3; + FrameAlign(argument_count, r2); + + // r0 - offset of start of capture + // r1 - length of capture + + // Put arguments into arguments registers. + // Parameters are + // r0: Address byte_offset1 - Address captured substring's start. + // r1: Address byte_offset2 - Address of current character position. + // r2: size_t byte_length - length of capture in bytes(!) + + // Address of start of capture. + __ add(r0, r0, Operand(end_of_input_address())); + // Length of capture. + __ mov(r2, Operand(r1)); + // Save length in callee-save register for use on return. + __ mov(r4, Operand(r1)); + // Address of current input position. + __ add(r1, current_input_offset(), Operand(end_of_input_address())); + + ExternalReference function = + ExternalReference::re_case_insensitive_compare_uc16(); + CallCFunction(function, argument_count); + + // Check if function returned non-zero for success or zero for failure. + __ cmp(r0, Operand(0)); + BranchOrBacktrack(eq, on_no_match); + // On success, increment position by length of capture. + __ add(current_input_offset(), current_input_offset(), Operand(r4)); + } + + __ bind(&fallthrough); +} + + +void RegExpMacroAssemblerARM::CheckNotBackReference( + int start_reg, + Label* on_no_match) { + Label fallthrough; + Label success; + + // Find length of back-referenced capture. + __ ldr(r0, register_location(start_reg)); + __ ldr(r1, register_location(start_reg + 1)); + __ sub(r1, r1, r0, SetCC); // Length to check. + // Succeed on empty capture (including no capture). + __ b(eq, &fallthrough); + + // Check that there are enough characters left in the input. + __ cmn(r1, Operand(current_input_offset())); + BranchOrBacktrack(gt, on_no_match); + + // Compute pointers to match string and capture string + __ add(r0, r0, Operand(end_of_input_address())); + __ add(r2, end_of_input_address(), Operand(current_input_offset())); + __ add(r1, r1, Operand(r0)); + + Label loop; + __ bind(&loop); + if (mode_ == ASCII) { + __ ldrb(r3, MemOperand(r0, char_size(), PostIndex)); + __ ldrb(r4, MemOperand(r2, char_size(), PostIndex)); + } else { + ASSERT(mode_ == UC16); + __ ldrh(r3, MemOperand(r0, char_size(), PostIndex)); + __ ldrh(r4, MemOperand(r2, char_size(), PostIndex)); + } + __ cmp(r3, r4); + BranchOrBacktrack(ne, on_no_match); + __ cmp(r0, r1); + __ b(lt, &loop); + + // Move current character position to position after match. + __ sub(current_input_offset(), r2, end_of_input_address()); + __ bind(&fallthrough); +} + + +void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1, + int reg2, + Label* on_not_equal) { + __ ldr(r0, register_location(reg1)); + __ ldr(r1, register_location(reg2)); + __ cmp(r0, r1); + BranchOrBacktrack(ne, on_not_equal); +} + + +void RegExpMacroAssemblerARM::CheckNotCharacter(uint32_t c, + Label* on_not_equal) { + __ cmp(current_character(), Operand(c)); + BranchOrBacktrack(ne, on_not_equal); +} + + +void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_equal) { + __ and_(r0, current_character(), Operand(mask)); + __ cmp(r0, Operand(c)); + BranchOrBacktrack(eq, on_equal); +} + + +void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_not_equal) { + __ and_(r0, current_character(), Operand(mask)); + __ cmp(r0, Operand(c)); + BranchOrBacktrack(ne, on_not_equal); +} + + +void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd( + uc16 c, + uc16 minus, + uc16 mask, + Label* on_not_equal) { + ASSERT(minus < String::kMaxUC16CharCode); + __ sub(r0, current_character(), Operand(minus)); + __ and_(r0, r0, Operand(mask)); + __ cmp(r0, Operand(c)); + BranchOrBacktrack(ne, on_not_equal); +} + + +bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, + int cp_offset, + bool check_offset, + Label* on_no_match) { + // Range checks (c in min..max) are generally implemented by an unsigned + // (c - min) <= (max - min) check + switch (type) { + case 's': + // Match space-characters + if (mode_ == ASCII) { + // ASCII space characters are '\t'..'\r' and ' '. + if (check_offset) { + LoadCurrentCharacter(cp_offset, on_no_match); + } else { + LoadCurrentCharacterUnchecked(cp_offset, 1); + } + Label success; + __ cmp(current_character(), Operand(' ')); + __ b(eq, &success); + // Check range 0x09..0x0d + __ sub(r0, current_character(), Operand('\t')); + __ cmp(r0, Operand('\r' - '\t')); + BranchOrBacktrack(hi, on_no_match); + __ bind(&success); + return true; + } + return false; + case 'S': + // Match non-space characters. + if (check_offset) { + LoadCurrentCharacter(cp_offset, on_no_match, 1); + } else { + LoadCurrentCharacterUnchecked(cp_offset, 1); + } + if (mode_ == ASCII) { + // ASCII space characters are '\t'..'\r' and ' '. + __ cmp(current_character(), Operand(' ')); + BranchOrBacktrack(eq, on_no_match); + __ sub(r0, current_character(), Operand('\t')); + __ cmp(r0, Operand('\r' - '\t')); + BranchOrBacktrack(ls, on_no_match); + return true; + } + return false; + case 'd': + // Match ASCII digits ('0'..'9') + if (check_offset) { + LoadCurrentCharacter(cp_offset, on_no_match, 1); + } else { + LoadCurrentCharacterUnchecked(cp_offset, 1); + } + __ sub(r0, current_character(), Operand('0')); + __ cmp(current_character(), Operand('9' - '0')); + BranchOrBacktrack(hi, on_no_match); + return true; + case 'D': + // Match non ASCII-digits + if (check_offset) { + LoadCurrentCharacter(cp_offset, on_no_match, 1); + } else { + LoadCurrentCharacterUnchecked(cp_offset, 1); + } + __ sub(r0, current_character(), Operand('0')); + __ cmp(r0, Operand('9' - '0')); + BranchOrBacktrack(ls, on_no_match); + return true; + case '.': { + // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029) + if (check_offset) { + LoadCurrentCharacter(cp_offset, on_no_match, 1); + } else { + LoadCurrentCharacterUnchecked(cp_offset, 1); + } + __ eor(r0, current_character(), Operand(0x01)); + // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c + __ sub(r0, r0, Operand(0x0b)); + __ cmp(r0, Operand(0x0c - 0x0b)); + BranchOrBacktrack(ls, on_no_match); + if (mode_ == UC16) { + // Compare original value to 0x2028 and 0x2029, using the already + // computed (current_char ^ 0x01 - 0x0b). I.e., check for + // 0x201d (0x2028 - 0x0b) or 0x201e. + __ sub(r0, r0, Operand(0x2028 - 0x0b)); + __ cmp(r0, Operand(1)); + BranchOrBacktrack(ls, on_no_match); + } + return true; + } + case '*': + // Match any character. + if (check_offset) { + CheckPosition(cp_offset, on_no_match); + } + return true; + // No custom implementation (yet): w, W, s(UC16), S(UC16). + default: + return false; + } +} + + +void RegExpMacroAssemblerARM::Fail() { + __ mov(r0, Operand(FAILURE)); + __ jmp(&exit_label_); +} + + +Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { + // Finalize code - write the entry point code now we know how many + // registers we need. + + // Entry code: + __ bind(&entry_label_); + // Push Link register. + // Push arguments + // Save callee-save registers. + // Start new stack frame. + // Order here should correspond to order of offset constants in header file. + RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() | + r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit(); + RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit(); + __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit()); + // Set frame pointer just above the arguments. + __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); + __ push(r0); // Make room for "position - 1" constant (value is irrelevant). + + // Check if we have space on the stack for registers. + Label stack_limit_hit; + Label stack_ok; + + ExternalReference stack_guard_limit = + ExternalReference::address_of_stack_guard_limit(); + __ mov(r0, Operand(stack_guard_limit)); + __ ldr(r0, MemOperand(r0)); + __ sub(r0, sp, r0, SetCC); + // Handle it if the stack pointer is already below the stack limit. + __ b(ls, &stack_limit_hit); + // Check if there is room for the variable number of registers above + // the stack limit. + __ cmp(r0, Operand(num_registers_ * kPointerSize)); + __ b(hs, &stack_ok); + // Exit with OutOfMemory exception. There is not enough space on the stack + // for our working registers. + __ mov(r0, Operand(EXCEPTION)); + __ jmp(&exit_label_); + + __ bind(&stack_limit_hit); + CallCheckStackGuardState(r0); + __ cmp(r0, Operand(0)); + // If returned value is non-zero, we exit with the returned value as result. + __ b(ne, &exit_label_); + + __ bind(&stack_ok); + + // Allocate space on stack for registers. + __ sub(sp, sp, Operand(num_registers_ * kPointerSize)); + // Load string end. + __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); + // Load input start. + __ ldr(r0, MemOperand(frame_pointer(), kInputStart)); + // Find negative length (offset of start relative to end). + __ sub(current_input_offset(), r0, end_of_input_address()); + // Set r0 to address of char before start of input + // (effectively string position -1). + __ sub(r0, current_input_offset(), Operand(char_size())); + // Store this value in a local variable, for use when clearing + // position registers. + __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); + if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. + // Fill saved registers with initial value = start offset - 1 + + // Address of register 0. + __ add(r1, frame_pointer(), Operand(kRegisterZero)); + __ mov(r2, Operand(num_saved_registers_)); + Label init_loop; + __ bind(&init_loop); + __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); + __ sub(r2, r2, Operand(1), SetCC); + __ b(ne, &init_loop); + } + + // Initialize backtrack stack pointer. + __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); + // Initialize code pointer register + __ mov(code_pointer(), Operand(masm_->CodeObject())); + // Load previous char as initial value of current character register. + Label at_start; + __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ cmp(r0, Operand(0)); + __ b(ne, &at_start); + LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. + __ jmp(&start_label_); + __ bind(&at_start); + __ mov(current_character(), Operand('\n')); + __ jmp(&start_label_); + + + // Exit code: + if (success_label_.is_linked()) { + // Save captures when successful. + __ bind(&success_label_); + if (num_saved_registers_ > 0) { + // copy captures to output + __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); + __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput)); + __ sub(r1, end_of_input_address(), r1); + // r1 is length of input in bytes. + if (mode_ == UC16) { + __ mov(r1, Operand(r1, LSR, 1)); + } + // r1 is length of input in characters. + + ASSERT_EQ(0, num_saved_registers_ % 2); + // Always an even number of capture registers. This allows us to + // unroll the loop once to add an operation between a load of a register + // and the following use of that register. + for (int i = 0; i < num_saved_registers_; i += 2) { + __ ldr(r2, register_location(i)); + __ ldr(r3, register_location(i + 1)); + if (mode_ == UC16) { + __ add(r2, r1, Operand(r2, ASR, 1)); + __ add(r3, r1, Operand(r3, ASR, 1)); + } else { + __ add(r2, r1, Operand(r2)); + __ add(r3, r1, Operand(r3)); + } + __ str(r2, MemOperand(r0, kPointerSize, PostIndex)); + __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); + } + } + __ mov(r0, Operand(SUCCESS)); + } + // Exit and return r0 + __ bind(&exit_label_); + // Skip sp past regexp registers and local variables.. + __ mov(sp, frame_pointer()); + // Restore registers r4..r11 and return (restoring lr to pc). + __ ldm(ia_w, sp, registers_to_retain | pc.bit()); + + // Backtrack code (branch target for conditional backtracks). + if (backtrack_label_.is_linked()) { + __ bind(&backtrack_label_); + Backtrack(); + } + + Label exit_with_exception; + + // Preempt-code + if (check_preempt_label_.is_linked()) { + SafeCallTarget(&check_preempt_label_); + + CallCheckStackGuardState(r0); + __ cmp(r0, Operand(0)); + // If returning non-zero, we should end execution with the given + // result as return value. + __ b(ne, &exit_label_); + + // String might have moved: Reload end of string from frame. + __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); + SafeReturn(); + } + + // Backtrack stack overflow code. + if (stack_overflow_label_.is_linked()) { + SafeCallTarget(&stack_overflow_label_); + // Reached if the backtrack-stack limit has been hit. + + Label grow_failed; + + // Call GrowStack(backtrack_stackpointer()) + int num_arguments = 2; + FrameAlign(num_arguments, r0); + __ mov(r0, backtrack_stackpointer()); + __ add(r1, frame_pointer(), Operand(kStackHighEnd)); + ExternalReference grow_stack = + ExternalReference::re_grow_stack(); + CallCFunction(grow_stack, num_arguments); + // If return NULL, we have failed to grow the stack, and + // must exit with a stack-overflow exception. + __ cmp(r0, Operand(0)); + __ b(eq, &exit_with_exception); + // Otherwise use return value as new stack pointer. + __ mov(backtrack_stackpointer(), r0); + // Restore saved registers and continue. + SafeReturn(); + } + + if (exit_with_exception.is_linked()) { + // If any of the code above needed to exit with an exception. + __ bind(&exit_with_exception); + // Exit with Result EXCEPTION(-1) to signal thrown exception. + __ mov(r0, Operand(EXCEPTION)); + __ jmp(&exit_label_); + } + + CodeDesc code_desc; + masm_->GetCode(&code_desc); + Handle<Code> code = Factory::NewCode(code_desc, + NULL, + Code::ComputeFlags(Code::REGEXP), + masm_->CodeObject()); + LOG(RegExpCodeCreateEvent(*code, *source)); + return Handle<Object>::cast(code); +} + + +void RegExpMacroAssemblerARM::GoTo(Label* to) { + BranchOrBacktrack(al, to); +} + + +void RegExpMacroAssemblerARM::IfRegisterGE(int reg, + int comparand, + Label* if_ge) { + __ ldr(r0, register_location(reg)); + __ cmp(r0, Operand(comparand)); + BranchOrBacktrack(ge, if_ge); +} + + +void RegExpMacroAssemblerARM::IfRegisterLT(int reg, + int comparand, + Label* if_lt) { + __ ldr(r0, register_location(reg)); + __ cmp(r0, Operand(comparand)); + BranchOrBacktrack(lt, if_lt); +} -}} // namespace v8::internal +void RegExpMacroAssemblerARM::IfRegisterEqPos(int reg, + Label* if_eq) { + __ ldr(r0, register_location(reg)); + __ cmp(r0, Operand(current_input_offset())); + BranchOrBacktrack(eq, if_eq); +} + + +RegExpMacroAssembler::IrregexpImplementation + RegExpMacroAssemblerARM::Implementation() { + return kARMImplementation; +} + + +void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset, + Label* on_end_of_input, + bool check_bounds, + int characters) { + ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. + ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) + if (check_bounds) { + CheckPosition(cp_offset + characters - 1, on_end_of_input); + } + LoadCurrentCharacterUnchecked(cp_offset, characters); +} + + +void RegExpMacroAssemblerARM::PopCurrentPosition() { + Pop(current_input_offset()); +} + + +void RegExpMacroAssemblerARM::PopRegister(int register_index) { + Pop(r0); + __ str(r0, register_location(register_index)); +} + + +static bool is_valid_memory_offset(int value) { + if (value < 0) value = -value; + return value < (1<<12); +} + + +void RegExpMacroAssemblerARM::PushBacktrack(Label* label) { + if (label->is_bound()) { + int target = label->pos(); + __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); + } else { + int constant_offset = GetBacktrackConstantPoolEntry(); + masm_->label_at_put(label, constant_offset); + // Reading pc-relative is based on the address 8 bytes ahead of + // the current opcode. + unsigned int offset_of_pc_register_read = + masm_->pc_offset() + Assembler::kPcLoadDelta; + int pc_offset_of_constant = + constant_offset - offset_of_pc_register_read; + ASSERT(pc_offset_of_constant < 0); + if (is_valid_memory_offset(pc_offset_of_constant)) { + masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize); + __ ldr(r0, MemOperand(pc, pc_offset_of_constant)); + } else { + // Not a 12-bit offset, so it needs to be loaded from the constant + // pool. + masm_->BlockConstPoolBefore( + masm_->pc_offset() + 2 * Assembler::kInstrSize); + __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize)); + __ ldr(r0, MemOperand(pc, r0)); + } + } + Push(r0); + CheckStackLimit(); +} + + +void RegExpMacroAssemblerARM::PushCurrentPosition() { + Push(current_input_offset()); +} + + +void RegExpMacroAssemblerARM::PushRegister(int register_index, + StackCheckFlag check_stack_limit) { + __ ldr(r0, register_location(register_index)); + Push(r0); + if (check_stack_limit) CheckStackLimit(); +} + + +void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) { + __ ldr(current_input_offset(), register_location(reg)); +} + + +void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) { + __ ldr(backtrack_stackpointer(), register_location(reg)); + __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd)); + __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0)); +} + + +void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) { + ASSERT(register_index >= num_saved_registers_); // Reserved for positions! + __ mov(r0, Operand(to)); + __ str(r0, register_location(register_index)); +} + + +void RegExpMacroAssemblerARM::Succeed() { + __ jmp(&success_label_); +} + + +void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg, + int cp_offset) { + if (cp_offset == 0) { + __ str(current_input_offset(), register_location(reg)); + } else { + __ add(r0, current_input_offset(), Operand(cp_offset * char_size())); + __ str(r0, register_location(reg)); + } +} + + +void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) { + ASSERT(reg_from <= reg_to); + __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); + for (int reg = reg_from; reg <= reg_to; reg++) { + __ str(r0, register_location(reg)); + } +} + + +void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) { + __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd)); + __ sub(r0, backtrack_stackpointer(), r1); + __ str(r0, register_location(reg)); +} + + +// Private methods: + +void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) { + int num_arguments = 3; + FrameAlign(num_arguments, scratch); + // RegExp code frame pointer. + __ mov(r2, frame_pointer()); + // Code* of self. + __ mov(r1, Operand(masm_->CodeObject())); + // r0 becomes return address pointer. + ExternalReference stack_guard_check = + ExternalReference::re_check_stack_guard_state(); + CallCFunctionUsingStub(stack_guard_check, num_arguments); +} + + +// Helper function for reading a value out of a stack frame. +template <typename T> +static T& frame_entry(Address re_frame, int frame_offset) { + return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset)); +} + + +int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame) { + if (StackGuard::IsStackOverflow()) { + Top::StackOverflow(); + return EXCEPTION; + } + + // If not real stack overflow the stack guard was used to interrupt + // execution for another purpose. + + // Prepare for possible GC. + HandleScope handles; + Handle<Code> code_handle(re_code); + + Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); + // Current string. + bool is_ascii = subject->IsAsciiRepresentation(); + + ASSERT(re_code->instruction_start() <= *return_address); + ASSERT(*return_address <= + re_code->instruction_start() + re_code->instruction_size()); + + Object* result = Execution::HandleStackGuardInterrupt(); + + if (*code_handle != re_code) { // Return address no longer valid + int delta = *code_handle - re_code; + // Overwrite the return address on the stack. + *return_address += delta; + } + + if (result->IsException()) { + return EXCEPTION; + } + + // String might have changed. + if (subject->IsAsciiRepresentation() != is_ascii) { + // If we changed between an ASCII and an UC16 string, the specialized + // code cannot be used, and we need to restart regexp matching from + // scratch (including, potentially, compiling a new version of the code). + return RETRY; + } + + // Otherwise, the content of the string might have moved. It must still + // be a sequential or external string with the same content. + // Update the start and end pointers in the stack frame to the current + // location (whether it has actually moved or not). + ASSERT(StringShape(*subject).IsSequential() || + StringShape(*subject).IsExternal()); + + // The original start address of the characters to match. + const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart); + + // Find the current start address of the same character at the current string + // position. + int start_index = frame_entry<int>(re_frame, kStartIndex); + const byte* new_address = StringCharacterPosition(*subject, start_index); + + if (start_address != new_address) { + // If there is a difference, update the object pointer and start and end + // addresses in the RegExp stack frame to match the new value. + const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd); + int byte_length = end_address - start_address; + frame_entry<const String*>(re_frame, kInputString) = *subject; + frame_entry<const byte*>(re_frame, kInputStart) = new_address; + frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length; + } + + return 0; +} + + +MemOperand RegExpMacroAssemblerARM::register_location(int register_index) { + ASSERT(register_index < (1<<30)); + if (num_registers_ <= register_index) { + num_registers_ = register_index + 1; + } + return MemOperand(frame_pointer(), + kRegisterZero - register_index * kPointerSize); +} + + +void RegExpMacroAssemblerARM::CheckPosition(int cp_offset, + Label* on_outside_input) { + __ cmp(current_input_offset(), Operand(-cp_offset * char_size())); + BranchOrBacktrack(ge, on_outside_input); +} + + +void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition, + Label* to) { + if (condition == al) { // Unconditional. + if (to == NULL) { + Backtrack(); + return; + } + __ jmp(to); + return; + } + if (to == NULL) { + __ b(condition, &backtrack_label_); + return; + } + __ b(condition, to); +} + + +void RegExpMacroAssemblerARM::SafeCall(Label* to, Condition cond) { + __ bl(to, cond); +} + + +void RegExpMacroAssemblerARM::SafeReturn() { + __ pop(lr); + __ add(pc, lr, Operand(masm_->CodeObject())); +} + + +void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) { + __ bind(name); + __ sub(lr, lr, Operand(masm_->CodeObject())); + __ push(lr); +} + + +void RegExpMacroAssemblerARM::Push(Register source) { + ASSERT(!source.is(backtrack_stackpointer())); + __ str(source, + MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex)); +} + + +void RegExpMacroAssemblerARM::Pop(Register target) { + ASSERT(!target.is(backtrack_stackpointer())); + __ ldr(target, + MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex)); +} + + +void RegExpMacroAssemblerARM::CheckPreemption() { + // Check for preemption. + ExternalReference stack_guard_limit = + ExternalReference::address_of_stack_guard_limit(); + __ mov(r0, Operand(stack_guard_limit)); + __ ldr(r0, MemOperand(r0)); + __ cmp(sp, r0); + SafeCall(&check_preempt_label_, ls); +} + + +void RegExpMacroAssemblerARM::CheckStackLimit() { + if (FLAG_check_stack) { + ExternalReference stack_limit = + ExternalReference::address_of_regexp_stack_limit(); + __ mov(r0, Operand(stack_limit)); + __ ldr(r0, MemOperand(r0)); + __ cmp(backtrack_stackpointer(), Operand(r0)); + SafeCall(&stack_overflow_label_, ls); + } +} + + +void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() { + __ CheckConstPool(false, false); + __ BlockConstPoolBefore( + masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize); + backtrack_constant_pool_offset_ = masm_->pc_offset(); + for (int i = 0; i < kBacktrackConstantPoolSize; i++) { + __ emit(0); + } + + backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize; +} + + +int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() { + while (backtrack_constant_pool_capacity_ > 0) { + int offset = backtrack_constant_pool_offset_; + backtrack_constant_pool_offset_ += kPointerSize; + backtrack_constant_pool_capacity_--; + if (masm_->pc_offset() - offset < 2 * KB) { + return offset; + } + } + Label new_pool_skip; + __ jmp(&new_pool_skip); + EmitBacktrackConstantPool(); + __ bind(&new_pool_skip); + int offset = backtrack_constant_pool_offset_; + backtrack_constant_pool_offset_ += kPointerSize; + backtrack_constant_pool_capacity_--; + return offset; +} + + +void RegExpMacroAssemblerARM::FrameAlign(int num_arguments, Register scratch) { + int frameAlignment = OS::ActivationFrameAlignment(); + // Up to four simple arguments are passed in registers r0..r3. + int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; + if (frameAlignment != 0) { + // Make stack end at alignment and make room for num_arguments - 4 words + // and the original value of sp. + __ mov(scratch, sp); + __ sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); + ASSERT(IsPowerOf2(frameAlignment)); + __ and_(sp, sp, Operand(-frameAlignment)); + __ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + __ sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); + } +} + + +void RegExpMacroAssemblerARM::CallCFunction(ExternalReference function, + int num_arguments) { + __ mov(r5, Operand(function)); + // Just call directly. The function called cannot cause a GC, or + // allow preemption, so the return address in the link register + // stays correct. + __ Call(r5); + int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; + if (OS::ActivationFrameAlignment() > kIntSize) { + __ ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); + } else { + __ add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); + } + __ mov(code_pointer(), Operand(masm_->CodeObject())); +} + + +void RegExpMacroAssemblerARM::CallCFunctionUsingStub( + ExternalReference function, + int num_arguments) { + // Must pass all arguments in registers. The stub pushes on the stack. + ASSERT(num_arguments <= 4); + __ mov(r5, Operand(function)); + RegExpCEntryStub stub; + __ CallStub(&stub); + if (OS::ActivationFrameAlignment() != 0) { + __ ldr(sp, MemOperand(sp, 0)); + } + __ mov(code_pointer(), Operand(masm_->CodeObject())); +} + + +void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, + int characters) { + Register offset = current_input_offset(); + if (cp_offset != 0) { + __ add(r0, current_input_offset(), Operand(cp_offset * char_size())); + offset = r0; + } + // We assume that we cannot do unaligned loads on ARM, so this function + // must only be used to load a single character at a time. + ASSERT(characters == 1); + if (mode_ == ASCII) { + __ ldrb(current_character(), MemOperand(end_of_input_address(), offset)); + } else { + ASSERT(mode_ == UC16); + __ ldrh(current_character(), MemOperand(end_of_input_address(), offset)); + } +} + + +void RegExpCEntryStub::Generate(MacroAssembler* masm_) { + int stack_alignment = OS::ActivationFrameAlignment(); + if (stack_alignment < kPointerSize) stack_alignment = kPointerSize; + // Stack is already aligned for call, so decrement by alignment + // to make room for storing the link register. + __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex)); + __ mov(r0, sp); + __ Call(r5); + __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex)); +} + +#undef __ + +#endif // V8_NATIVE_REGEXP + +}} // namespace v8::internal diff --git a/V8Binding/v8/src/arm/regexp-macro-assembler-arm.h b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.h index de55183..0711ac1 100644 --- a/V8Binding/v8/src/arm/regexp-macro-assembler-arm.h +++ b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.h @@ -31,12 +31,238 @@ namespace v8 { namespace internal { + +#ifndef V8_NATIVE_REGEXP class RegExpMacroAssemblerARM: public RegExpMacroAssembler { public: RegExpMacroAssemblerARM(); virtual ~RegExpMacroAssemblerARM(); }; +#else +class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { + public: + RegExpMacroAssemblerARM(Mode mode, int registers_to_save); + virtual ~RegExpMacroAssemblerARM(); + virtual int stack_limit_slack(); + virtual void AdvanceCurrentPosition(int by); + virtual void AdvanceRegister(int reg, int by); + virtual void Backtrack(); + virtual void Bind(Label* label); + virtual void CheckAtStart(Label* on_at_start); + virtual void CheckCharacter(uint32_t c, Label* on_equal); + virtual void CheckCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_equal); + virtual void CheckCharacterGT(uc16 limit, Label* on_greater); + virtual void CheckCharacterLT(uc16 limit, Label* on_less); + virtual void CheckCharacters(Vector<const uc16> str, + int cp_offset, + Label* on_failure, + bool check_end_of_string); + // A "greedy loop" is a loop that is both greedy and with a simple + // body. It has a particularly simple implementation. + virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); + virtual void CheckNotAtStart(Label* on_not_at_start); + virtual void CheckNotBackReference(int start_reg, Label* on_no_match); + virtual void CheckNotBackReferenceIgnoreCase(int start_reg, + Label* on_no_match); + virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal); + virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); + virtual void CheckNotCharacterAfterAnd(uint32_t c, + uint32_t mask, + Label* on_not_equal); + virtual void CheckNotCharacterAfterMinusAnd(uc16 c, + uc16 minus, + uc16 mask, + Label* on_not_equal); + // Checks whether the given offset from the current position is before + // the end of the string. + virtual void CheckPosition(int cp_offset, Label* on_outside_input); + virtual bool CheckSpecialCharacterClass(uc16 type, + int cp_offset, + bool check_offset, + Label* on_no_match); + virtual void Fail(); + virtual Handle<Object> GetCode(Handle<String> source); + virtual void GoTo(Label* label); + virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); + virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); + virtual void IfRegisterEqPos(int reg, Label* if_eq); + virtual IrregexpImplementation Implementation(); + virtual void LoadCurrentCharacter(int cp_offset, + Label* on_end_of_input, + bool check_bounds = true, + int characters = 1); + virtual void PopCurrentPosition(); + virtual void PopRegister(int register_index); + virtual void PushBacktrack(Label* label); + virtual void PushCurrentPosition(); + virtual void PushRegister(int register_index, + StackCheckFlag check_stack_limit); + virtual void ReadCurrentPositionFromRegister(int reg); + virtual void ReadStackPointerFromRegister(int reg); + virtual void SetRegister(int register_index, int to); + virtual void Succeed(); + virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); + virtual void ClearRegisters(int reg_from, int reg_to); + virtual void WriteStackPointerToRegister(int reg); + + // Called from RegExp if the stack-guard is triggered. + // If the code object is relocated, the return address is fixed before + // returning. + static int CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame); + private: + // Offsets from frame_pointer() of function parameters and stored registers. + static const int kFramePointer = 0; + + // Above the frame pointer - Stored registers and stack passed parameters. + // Register 4..11. + static const int kStoredRegisters = kFramePointer; + // Return address (stored from link register, read into pc on return). + static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize; + // Stack parameters placed by caller. + static const int kRegisterOutput = kReturnAddress + kPointerSize; + static const int kAtStart = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kAtStart + kPointerSize; + + // Below the frame pointer. + // Register parameters stored by setup code. + static const int kInputEnd = kFramePointer - kPointerSize; + static const int kInputStart = kInputEnd - kPointerSize; + static const int kStartIndex = kInputStart - kPointerSize; + static const int kInputString = kStartIndex - kPointerSize; + // When adding local variables remember to push space for them in + // the frame in GetCode. + static const int kInputStartMinusOne = kInputString - kPointerSize; + // First register address. Following registers are below it on the stack. + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; + + // Initial size of code buffer. + static const size_t kRegExpCodeSize = 1024; + + static const int kBacktrackConstantPoolSize = 4; + + // Load a number of characters at the given offset from the + // current position, into the current-character register. + void LoadCurrentCharacterUnchecked(int cp_offset, int character_count); + + // Check whether preemption has been requested. + void CheckPreemption(); + + // Check whether we are exceeding the stack limit on the backtrack stack. + void CheckStackLimit(); + + void EmitBacktrackConstantPool(); + int GetBacktrackConstantPoolEntry(); + + + // Generate a call to CheckStackGuardState. + void CallCheckStackGuardState(Register scratch); + + // The ebp-relative location of a regexp register. + MemOperand register_location(int register_index); + + // Register holding the current input position as negative offset from + // the end of the string. + inline Register current_input_offset() { return r6; } + + // The register containing the current character after LoadCurrentCharacter. + inline Register current_character() { return r7; } + + // Register holding address of the end of the input string. + inline Register end_of_input_address() { return r10; } + + // Register holding the frame address. Local variables, parameters and + // regexp registers are addressed relative to this. + inline Register frame_pointer() { return fp; } + + // The register containing the backtrack stack top. Provides a meaningful + // name to the register. + inline Register backtrack_stackpointer() { return r8; } + + // Register holding pointer to the current code object. + inline Register code_pointer() { return r5; } + + // Byte size of chars in the string to match (decided by the Mode argument) + inline int char_size() { return static_cast<int>(mode_); } + + // Equivalent to a conditional branch to the label, unless the label + // is NULL, in which case it is a conditional Backtrack. + void BranchOrBacktrack(Condition condition, Label* to); + + // Call and return internally in the generated code in a way that + // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) + inline void SafeCall(Label* to, Condition cond = al); + inline void SafeReturn(); + inline void SafeCallTarget(Label* name); + + // Pushes the value of a register on the backtrack stack. Decrements the + // stack pointer by a word size and stores the register's value there. + inline void Push(Register source); + + // Pops a value from the backtrack stack. Reads the word at the stack pointer + // and increments it by a word size. + inline void Pop(Register target); + + // Before calling a C-function from generated code, align arguments on stack. + // After aligning the frame, non-register arguments must be stored in + // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments + // are word sized. + // Some compilers/platforms require the stack to be aligned when calling + // C++ code. + // Needs a scratch register to do some arithmetic. This register will be + // trashed. + inline void FrameAlign(int num_arguments, Register scratch); + + // Calls a C function and cleans up the space for arguments allocated + // by FrameAlign. The called function is not allowed to trigger a garbage + // collection. + inline void CallCFunction(ExternalReference function, + int num_arguments); + + // Calls a C function and cleans up the frame alignment done by + // by FrameAlign. The called function *is* allowed to trigger a garbage + // collection, but may not take more than four arguments (no arguments + // passed on the stack), and the first argument will be a pointer to the + // return address. + inline void CallCFunctionUsingStub(ExternalReference function, + int num_arguments); + + + MacroAssembler* masm_; + + // Which mode to generate code for (ASCII or UC16). + Mode mode_; + + // One greater than maximal register index actually used. + int num_registers_; + + // Number of registers to output at the end (the saved registers + // are always 0..num_saved_registers_-1) + int num_saved_registers_; + + // Manage a small pre-allocated pool for writing label targets + // to for pushing backtrack addresses. + int backtrack_constant_pool_offset_; + int backtrack_constant_pool_capacity_; + + // Labels used internally. + Label entry_label_; + Label start_label_; + Label success_label_; + Label backtrack_label_; + Label exit_label_; + Label check_preempt_label_; + Label stack_overflow_label_; +}; + + +#endif // V8_NATIVE_REGEXP + + }} // namespace v8::internal #endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ diff --git a/V8Binding/v8/src/arm/simulator-arm.cc b/V8Binding/v8/src/arm/simulator-arm.cc index d12ddbf..7d0ee24 100644 --- a/V8Binding/v8/src/arm/simulator-arm.cc +++ b/V8Binding/v8/src/arm/simulator-arm.cc @@ -26,7 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdlib.h> - +#include <cstdarg> #include "v8.h" #include "disasm.h" @@ -70,7 +70,7 @@ class Debugger { Simulator* sim_; - bool GetValue(char* desc, int32_t* value); + bool GetValue(const char* desc, int32_t* value); // Set or delete a breakpoint. Returns true if successful. bool SetBreakpoint(Instr* breakpc); @@ -132,6 +132,8 @@ void Debugger::Stop(Instr* instr) { #endif +// The order of these are important, see the handling of the 'print all' +// debugger command. static const char* reg_names[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", @@ -147,7 +149,7 @@ static int reg_nums[] = { 0, 1, 2, 3, 11, 10}; -static int RegNameToRegNum(char* name) { +static int RegNameToRegNum(const char* name) { int reg = 0; while (*reg_names[reg] != 0) { if (strcmp(reg_names[reg], name) == 0) { @@ -159,7 +161,7 @@ static int RegNameToRegNum(char* name) { } -bool Debugger::GetValue(char* desc, int32_t* value) { +bool Debugger::GetValue(const char* desc, int32_t* value) { int regnum = RegNameToRegNum(desc); if (regnum >= 0) { if (regnum == 15) { @@ -246,7 +248,7 @@ void Debugger::Debug() { v8::internal::EmbeddedVector<char, 256> buffer; dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc())); - PrintF(" 0x%x %s\n", sim_->get_pc(), buffer.start()); + PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start()); last_pc = sim_->get_pc(); } char* line = ReadLine("sim> "); @@ -270,13 +272,28 @@ void Debugger::Debug() { } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { if (args == 2) { int32_t value; - if (GetValue(arg1, &value)) { - PrintF("%s: %d 0x%x\n", arg1, value, value); + if (strcmp(arg1, "all") == 0) { + for (int i = 0; i <= 15; i++) { + if (GetValue(reg_names[i], &value)) { + if (i <= 10) { + PrintF("%3s: 0x%08x %d\n", reg_names[i], value, value); + } else { + PrintF("%3s: 0x%08x %d\n", + reg_names[15 + 16 - i], + value, + value); + } + } + } } else { - PrintF("%s unrecognized\n", arg1); + if (GetValue(arg1, &value)) { + PrintF("%s: 0x%08x %d \n", arg1, value, value); + } else { + PrintF("%s unrecognized\n", arg1); + } } } else { - PrintF("print value\n"); + PrintF("print <register>\n"); } } else if ((strcmp(cmd, "po") == 0) || (strcmp(cmd, "printobject") == 0)) { @@ -286,14 +303,17 @@ void Debugger::Debug() { Object* obj = reinterpret_cast<Object*>(value); USE(obj); PrintF("%s: \n", arg1); -#if defined(DEBUG) +#ifdef DEBUG obj->PrintLn(); -#endif // defined(DEBUG) +#else + obj->ShortPrint(); + PrintF("\n"); +#endif } else { PrintF("%s unrecognized\n", arg1); } } else { - PrintF("printobject value\n"); + PrintF("printobject <value>\n"); } } else if (strcmp(cmd, "disasm") == 0) { disasm::NameConverter converter; @@ -325,7 +345,7 @@ void Debugger::Debug() { while (cur < end) { dasm.InstructionDecode(buffer, cur); - PrintF(" 0x%x %s\n", cur, buffer.start()); + PrintF(" 0x%08x %s\n", cur, buffer.start()); cur += Instr::kInstrSize; } } else if (strcmp(cmd, "gdb") == 0) { @@ -343,7 +363,7 @@ void Debugger::Debug() { PrintF("%s unrecognized\n", arg1); } } else { - PrintF("break addr\n"); + PrintF("break <address>\n"); } } else if (strcmp(cmd, "del") == 0) { if (!DeleteBreakpoint(NULL)) { @@ -362,6 +382,30 @@ void Debugger::Debug() { } else { PrintF("Not at debugger stop."); } + } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { + PrintF("cont\n"); + PrintF(" continue execution (alias 'c')\n"); + PrintF("stepi\n"); + PrintF(" step one instruction (alias 'si')\n"); + PrintF("print <register>\n"); + PrintF(" print register content (alias 'p')\n"); + PrintF(" use register name 'all' to print all registers\n"); + PrintF("printobject <register>\n"); + PrintF(" print an object from a register (alias 'po')\n"); + PrintF("flags\n"); + PrintF(" print flags\n"); + PrintF("disasm [<instructions>]\n"); + PrintF("disasm [[<address>] <instructions>]\n"); + PrintF(" disassemble code, default is 10 instructions from pc\n"); + PrintF("gdb\n"); + PrintF(" enter gdb\n"); + PrintF("break <address>\n"); + PrintF(" set a break point on the address\n"); + PrintF("del\n"); + PrintF(" delete the breakpoint\n"); + PrintF("unstop\n"); + PrintF(" ignore the stop instruction at the current location"); + PrintF(" from now on\n"); } else { PrintF("Unknown command: %s\n", cmd); } @@ -576,7 +620,7 @@ int Simulator::ReadW(int32_t addr, Instr* instr) { intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); return *ptr; } - PrintF("Unaligned read at %x\n", addr); + PrintF("Unaligned read at 0x%08x\n", addr); UNIMPLEMENTED(); return 0; } @@ -588,7 +632,7 @@ void Simulator::WriteW(int32_t addr, int value, Instr* instr) { *ptr = value; return; } - PrintF("Unaligned write at %x, pc=%p\n", addr, instr); + PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); } @@ -598,7 +642,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) { uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); return *ptr; } - PrintF("Unaligned read at %x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); return 0; } @@ -609,7 +653,7 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) { int16_t* ptr = reinterpret_cast<int16_t*>(addr); return *ptr; } - PrintF("Unaligned read at %x\n", addr); + PrintF("Unaligned signed halfword read at 0x%08x\n", addr); UNIMPLEMENTED(); return 0; } @@ -621,7 +665,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) { *ptr = value; return; } - PrintF("Unaligned write at %x, pc=%p\n", addr, instr); + PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); } @@ -632,7 +676,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) { *ptr = value; return; } - PrintF("Unaligned write at %x, pc=%p\n", addr, instr); + PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr); UNIMPLEMENTED(); } @@ -671,7 +715,7 @@ uintptr_t Simulator::StackLimit() const { // Unsupported instructions use Format to print an error and stop execution. void Simulator::Format(Instr* instr, const char* format) { - PrintF("Simulator found unsupported instruction:\n 0x%x: %s\n", + PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n", instr, format); UNIMPLEMENTED(); } @@ -1416,8 +1460,12 @@ void Simulator::DecodeType01(Instr* instr) { case CMN: { if (instr->HasS()) { - Format(instr, "cmn'cond 'rn, 'shift_rm"); - Format(instr, "cmn'cond 'rn, 'imm"); + // Format(instr, "cmn'cond 'rn, 'shift_rm"); + // Format(instr, "cmn'cond 'rn, 'imm"); + alu_out = rn_val + shifter_operand; + SetNZFlags(alu_out); + SetCFlag(!CarryFrom(rn_val, shifter_operand)); + SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true)); } else { ASSERT(type == 0); int rm = instr->RmField(); @@ -1566,6 +1614,7 @@ void Simulator::DecodeType2(Instr* instr) { void Simulator::DecodeType3(Instr* instr) { + ASSERT(instr->Bit(4) == 0); int rd = instr->RdField(); int rn = instr->RnField(); int32_t rn_val = get_register(rn); @@ -1605,7 +1654,12 @@ void Simulator::DecodeType3(Instr* instr) { } } if (instr->HasB()) { - UNIMPLEMENTED(); + if (instr->HasL()) { + uint8_t byte = ReadB(addr); + set_register(rd, byte); + } else { + UNIMPLEMENTED(); + } } else { if (instr->HasL()) { set_register(rd, ReadW(addr, instr)); @@ -1630,12 +1684,13 @@ void Simulator::DecodeType4(Instr* instr) { void Simulator::DecodeType5(Instr* instr) { // Format(instr, "b'l'cond 'target"); - int off = (instr->SImmed24Field() << 2) + 8; - intptr_t pc = get_pc(); + int off = (instr->SImmed24Field() << 2); + intptr_t pc_address = get_pc(); if (instr->HasLink()) { - set_register(lr, pc + Instr::kInstrSize); + set_register(lr, pc_address + Instr::kInstrSize); } - set_pc(pc+off); + int pc_reg = get_register(pc); + set_pc(pc_reg + off); } @@ -1654,14 +1709,76 @@ void Simulator::DecodeType7(Instr* instr) { } -// Executes the current instruction. -void Simulator::InstructionDecode(Instr* instr) { - pc_modified_ = false; - if (instr->ConditionField() == special_condition) { +void Simulator::DecodeUnconditional(Instr* instr) { + if (instr->Bits(7, 4) == 0x0B && instr->Bits(27, 25) == 0 && instr->HasL()) { + // Load halfword instruction, either register or immediate offset. + int rd = instr->RdField(); + int rn = instr->RnField(); + int32_t rn_val = get_register(rn); + int32_t addr = 0; + int32_t offset; + if (instr->Bit(22) == 0) { + // Register offset. + int rm = instr->RmField(); + offset = get_register(rm); + } else { + // Immediate offset + offset = instr->Bits(3, 0) + (instr->Bits(11, 8) << 4); + } + switch (instr->PUField()) { + case 0: { + // Post index, negative. + ASSERT(!instr->HasW()); + addr = rn_val; + rn_val -= offset; + set_register(rn, rn_val); + break; + } + case 1: { + // Post index, positive. + ASSERT(!instr->HasW()); + addr = rn_val; + rn_val += offset; + set_register(rn, rn_val); + break; + } + case 2: { + // Pre index or offset, negative. + rn_val -= offset; + addr = rn_val; + if (instr->HasW()) { + set_register(rn, rn_val); + } + break; + } + case 3: { + // Pre index or offset, positive. + rn_val += offset; + addr = rn_val; + if (instr->HasW()) { + set_register(rn, rn_val); + } + break; + } + default: { + // The PU field is a 2-bit field. + UNREACHABLE(); + break; + } + } + // Not sign extending, so load as unsigned. + uint16_t halfword = ReadH(addr, instr); + set_register(rd, halfword); + } else { Debugger dbg(this); dbg.Stop(instr); - return; } +} + + +// Executes the current instruction. +void Simulator::InstructionDecode(Instr* instr) { + pc_modified_ = false; if (::v8::internal::FLAG_trace_sim) { disasm::NameConverter converter; disasm::Disassembler dasm(converter); @@ -1669,9 +1786,11 @@ void Simulator::InstructionDecode(Instr* instr) { v8::internal::EmbeddedVector<char, 256> buffer; dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr)); - PrintF(" 0x%x %s\n", instr, buffer.start()); + PrintF(" 0x%08x %s\n", instr, buffer.start()); } - if (ConditionallyExecute(instr)) { + if (instr->ConditionField() == special_condition) { + DecodeUnconditional(instr); + } else if (ConditionallyExecute(instr)) { switch (instr->TypeField()) { case 0: case 1: { @@ -1747,19 +1866,35 @@ void Simulator::Execute() { } -Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2, - int32_t p3, int32_t p4) { - // Setup parameters - set_register(r0, p0); - set_register(r1, p1); - set_register(r2, p2); - set_register(r3, p3); - intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp)); - *(--stack_pointer) = p4; - set_register(sp, reinterpret_cast<int32_t>(stack_pointer)); +int32_t Simulator::Call(byte* entry, int argument_count, ...) { + va_list parameters; + va_start(parameters, argument_count); + // Setup arguments + + // First four arguments passed in registers. + ASSERT(argument_count >= 4); + set_register(r0, va_arg(parameters, int32_t)); + set_register(r1, va_arg(parameters, int32_t)); + set_register(r2, va_arg(parameters, int32_t)); + set_register(r3, va_arg(parameters, int32_t)); + + // Remaining arguments passed on stack. + int original_stack = get_register(sp); + // Compute position of stack on entry to generated code. + int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); + if (OS::ActivationFrameAlignment() != 0) { + entry_stack &= -OS::ActivationFrameAlignment(); + } + // Store remaining arguments on stack, from low to high memory. + intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); + for (int i = 4; i < argument_count; i++) { + stack_argument[i - 4] = va_arg(parameters, int32_t); + } + va_end(parameters); + set_register(sp, entry_stack); // Prepare to execute the code at entry - set_register(pc, entry); + set_register(pc, reinterpret_cast<int32_t>(entry)); // Put down marker for end of simulation. The simulator will stop simulation // when the PC reaches this value. By saving the "end simulation" value into // the LR the simulation stops when returning to this call point. @@ -1793,14 +1928,14 @@ Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2, Execute(); // Check that the callee-saved registers have been preserved. - CHECK_EQ(get_register(r4), callee_saved_value); - CHECK_EQ(get_register(r5), callee_saved_value); - CHECK_EQ(get_register(r6), callee_saved_value); - CHECK_EQ(get_register(r7), callee_saved_value); - CHECK_EQ(get_register(r8), callee_saved_value); - CHECK_EQ(get_register(r9), callee_saved_value); - CHECK_EQ(get_register(r10), callee_saved_value); - CHECK_EQ(get_register(r11), callee_saved_value); + CHECK_EQ(callee_saved_value, get_register(r4)); + CHECK_EQ(callee_saved_value, get_register(r5)); + CHECK_EQ(callee_saved_value, get_register(r6)); + CHECK_EQ(callee_saved_value, get_register(r7)); + CHECK_EQ(callee_saved_value, get_register(r8)); + CHECK_EQ(callee_saved_value, get_register(r9)); + CHECK_EQ(callee_saved_value, get_register(r10)); + CHECK_EQ(callee_saved_value, get_register(r11)); // Restore callee-saved registers with the original value. set_register(r4, r4_val); @@ -1812,8 +1947,12 @@ Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2, set_register(r10, r10_val); set_register(r11, r11_val); - int result = get_register(r0); - return reinterpret_cast<Object*>(result); + // Pop stack passed arguments. + CHECK_EQ(entry_stack, get_register(sp)); + set_register(sp, original_stack); + + int32_t result = get_register(r0); + return result; } } } // namespace assembler::arm diff --git a/V8Binding/v8/src/arm/simulator-arm.h b/V8Binding/v8/src/arm/simulator-arm.h index 15b92a5..3917d6a 100644 --- a/V8Binding/v8/src/arm/simulator-arm.h +++ b/V8Binding/v8/src/arm/simulator-arm.h @@ -40,7 +40,7 @@ // When running without a simulator we call the entry directly. #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ - reinterpret_cast<Object*>(entry(p0, p1, p2, p3, p4)) + (entry(p0, p1, p2, p3, p4)) // Calculated the stack limit beyond which we will throw stack overflow errors. // This macro must be called from a C++ method. It relies on being able to take @@ -49,13 +49,20 @@ #define GENERATED_CODE_STACK_LIMIT(limit) \ (reinterpret_cast<uintptr_t>(this) - limit) + +// Call the generated regexp code directly. The entry function pointer should +// expect seven int/pointer sized arguments and return an int. +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ + entry(p0, p1, p2, p3, p4, p5, p6) + #else // defined(__arm__) // When running with the simulator transition into simulated execution at this // point. #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ - assembler::arm::Simulator::current()->Call((int32_t)entry, (int32_t)p0, \ - (int32_t)p1, (int32_t)p2, (int32_t)p3, (int32_t)p4) + reinterpret_cast<Object*>( \ + assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \ + p0, p1, p2, p3, p4)) // The simulator has its own stack. Thus it has a different stack limit from // the C-based native code. @@ -63,6 +70,10 @@ (assembler::arm::Simulator::current()->StackLimit()) +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ + assembler::arm::Simulator::current()->Call( \ + FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) + #include "constants-arm.h" @@ -109,11 +120,10 @@ class Simulator { // Call on program start. static void Initialize(); - // V8 generally calls into generated code with 5 parameters. This is a - // convenience function, which sets up the simulator state and grabs the - // result on return. - v8::internal::Object* Call(int32_t entry, int32_t p0, int32_t p1, - int32_t p2, int32_t p3, int32_t p4); + // V8 generally calls into generated JS code with 5 parameters and into + // generated RegExp code with 7 parameters. This is a convenience function, + // which sets up the simulator state and grabs the result on return. + int32_t Call(byte* entry, int argument_count, ...); private: enum special_values { @@ -174,6 +184,7 @@ class Simulator { void DecodeType5(Instr* instr); void DecodeType6(Instr* instr); void DecodeType7(Instr* instr); + void DecodeUnconditional(Instr* instr); // Executes one instruction. void InstructionDecode(Instr* instr); diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc index 1581428..88d3303 100644 --- a/V8Binding/v8/src/arm/stub-cache-arm.cc +++ b/V8Binding/v8/src/arm/stub-cache-arm.cc @@ -791,7 +791,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). - __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3); + __ IncrementCounter(&Counters::call_global_inline, 1, r2, r3); ASSERT(function->is_compiled()); Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); @@ -1344,7 +1344,138 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, Object* ConstructStubCompiler::CompileConstructStub( SharedFunctionInfo* shared) { - // Not implemented yet - just jump to generic stub. + // ----------- S t a t e ------------- + // -- r0 : argc + // -- r1 : constructor + // -- lr : return address + // -- [sp] : last argument + // ----------------------------------- + Label generic_stub_call; + + // Use r7 for holding undefined which is used in several places below. + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + +#ifdef ENABLE_DEBUGGER_SUPPORT + // Check to see whether there are any break points in the function code. If + // there are jump to the generic constructor stub which calls the actual + // code for the function thereby hitting the break points. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); + __ cmp(r2, r7); + __ b(ne, &generic_stub_call); +#endif + + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + // r7: undefined + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &generic_stub_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &generic_stub_call); + +#ifdef DEBUG + // Cannot construct functions this way. + // r0: argc + // r1: constructor function + // r2: initial map + // r7: undefined + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ Check(ne, "Function constructed by construct stub."); +#endif + + // Now allocate the JSObject in new space. + // r0: argc + // r1: constructor function + // r2: initial map + // r7: undefined + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateObjectInNewSpace(r3, + r4, + r5, + r6, + &generic_stub_call, + NO_ALLOCATION_FLAGS); + + // Allocated the JSObject, now initialize the fields. Map is set to initial + // map and properties and elements are set to empty fixed array. + // r0: argc + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r7: undefined + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Calculate the location of the first argument. The stack contains only the + // argc arguments. + __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); + + // Fill all the in-object properties with undefined. + // r0: argc + // r1: first argument + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + // r7: undefined + // Fill the initialized properties with a constant value or a passed argument + // depending on the this.x = ...; assignment in the function. + for (int i = 0; i < shared->this_property_assignments_count(); i++) { + if (shared->IsThisPropertyAssignmentArgument(i)) { + Label not_passed, next; + // Check if the argument assigned to the property is actually passed. + int arg_number = shared->GetThisPropertyAssignmentArgument(i); + __ cmp(r0, Operand(arg_number)); + __ b(le, ¬_passed); + // Argument passed - find it on the stack. + __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize)); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + __ b(&next); + __ bind(¬_passed); + // Set the property to undefined. + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + __ bind(&next); + } else { + // Set the property to the constant value. + Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); + __ mov(r2, Operand(constant)); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + } + } + + // Fill the unused in-object property fields with undefined. + for (int i = shared->this_property_assignments_count(); + i < shared->CalculateInObjectProperties(); + i++) { + __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); + } + + // r0: argc + // r4: JSObject (not tagged) + // Move argc to r1 and the JSObject to return to r0 and tag it. + __ mov(r1, r0); + __ mov(r0, r4); + __ orr(r0, r0, Operand(kHeapObjectTag)); + + // r0: JSObject + // r1: argc + // Remove caller arguments and receiver from the stack and return. + __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); + __ add(sp, sp, Operand(kPointerSize)); + __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2); + __ IncrementCounter(&Counters::constructed_objects_stub, 1, r1, r2); + __ Jump(lr); + + // Jump to the generic stub in case the specialized code cannot handle the + // construction. + __ bind(&generic_stub_call); Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); Handle<Code> generic_construct_stub(code); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.cc b/V8Binding/v8/src/arm/virtual-frame-arm.cc index 9795860..5b5c870 100644 --- a/V8Binding/v8/src/arm/virtual-frame-arm.cc +++ b/V8Binding/v8/src/arm/virtual-frame-arm.cc @@ -141,9 +141,26 @@ void VirtualFrame::AllocateStackSlots() { Adjust(count); // Initialize stack slots with 'undefined' value. __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - for (int i = 0; i < count; i++) { - __ push(ip); - } + } + if (FLAG_check_stack) { + __ LoadRoot(r2, Heap::kStackLimitRootIndex); + } + for (int i = 0; i < count; i++) { + __ push(ip); + } + if (FLAG_check_stack) { + // Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is + // added to the implicit 8 byte offset that always applies to operations + // with pc and gives a return address 12 bytes down. + masm()->add(lr, pc, Operand(sizeof(Instr))); + masm()->cmp(sp, Operand(r2)); + StackCheckStub stub; + // Call the stub if lower. + masm()->mov(pc, + Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), + RelocInfo::CODE_TARGET), + LeaveCC, + lo); } } diff --git a/V8Binding/v8/src/assembler.cc b/V8Binding/v8/src/assembler.cc index 546490e..3563ebd 100644 --- a/V8Binding/v8/src/assembler.cc +++ b/V8Binding/v8/src/assembler.cc @@ -42,6 +42,20 @@ #include "serialize.h" #include "stub-cache.h" #include "regexp-stack.h" +#include "ast.h" +#include "regexp-macro-assembler.h" +// Include native regexp-macro-assembler. +#ifdef V8_NATIVE_REGEXP +#if V8_TARGET_ARCH_IA32 +#include "ia32/regexp-macro-assembler-ia32.h" +#elif V8_TARGET_ARCH_X64 +#include "x64/regexp-macro-assembler-x64.h" +#elif V8_TARGET_ARCH_ARM +#include "arm/regexp-macro-assembler-arm.h" +#else // Unknown architecture. +#error "Unknown architecture." +#endif // Target architecture. +#endif // V8_NATIVE_REGEXP namespace v8 { namespace internal { @@ -597,6 +611,34 @@ ExternalReference ExternalReference::new_space_allocation_limit_address() { return ExternalReference(Heap::NewSpaceAllocationLimitAddress()); } +#ifdef V8_NATIVE_REGEXP + +ExternalReference ExternalReference::re_check_stack_guard_state() { + Address function; +#ifdef V8_TARGET_ARCH_X64 + function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState); +#elif V8_TARGET_ARCH_IA32 + function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState); +#elif V8_TARGET_ARCH_ARM + function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); +#else + UNREACHABLE("Unexpected architecture"); +#endif + return ExternalReference(Redirect(function)); +} + +ExternalReference ExternalReference::re_grow_stack() { + return ExternalReference( + Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack))); +} + +ExternalReference ExternalReference::re_case_insensitive_compare_uc16() { + return ExternalReference(Redirect( + FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16))); +} + +#endif + static double add_two_doubles(double x, double y) { return x + y; diff --git a/V8Binding/v8/src/assembler.h b/V8Binding/v8/src/assembler.h index e217918..827389a 100644 --- a/V8Binding/v8/src/assembler.h +++ b/V8Binding/v8/src/assembler.h @@ -431,6 +431,19 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference debug_step_in_fp_address(); #endif +#ifdef V8_NATIVE_REGEXP + // C functions called from RegExp generated code. + + // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16() + static ExternalReference re_case_insensitive_compare_uc16(); + + // Function RegExpMacroAssembler*::CheckStackGuardState() + static ExternalReference re_check_stack_guard_state(); + + // Function NativeRegExpMacroAssembler::GrowStack() + static ExternalReference re_grow_stack(); +#endif + // This lets you register a function that rewrites all external references. // Used by the ARM simulator to catch calls to external references. static void set_redirector(ExternalReferenceRedirector* redirector) { diff --git a/V8Binding/v8/src/code-stubs.h b/V8Binding/v8/src/code-stubs.h index 76ec787..ae86c20 100644 --- a/V8Binding/v8/src/code-stubs.h +++ b/V8Binding/v8/src/code-stubs.h @@ -57,6 +57,7 @@ class CodeStub BASE_EMBEDDED { SetProperty, // ARM only InvokeBuiltin, // ARM only JSExit, // ARM only + RegExpCEntry, // ARM only NUMBER_OF_IDS }; diff --git a/V8Binding/v8/src/d8.cc b/V8Binding/v8/src/d8.cc index 7082280..e4658b1 100644 --- a/V8Binding/v8/src/d8.cc +++ b/V8Binding/v8/src/d8.cc @@ -159,8 +159,7 @@ Handle<Value> Shell::Write(const Arguments& args) { printf(" "); } v8::String::Utf8Value str(args[i]); - const char* cstr = ToCString(str); - printf("%s", cstr); + fwrite(*str, sizeof(**str), str.length(), stdout); } return Undefined(); } @@ -180,15 +179,15 @@ Handle<Value> Shell::Read(const Arguments& args) { Handle<Value> Shell::ReadLine(const Arguments& args) { - char line_buf[256]; - if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) { - return ThrowException(String::New("Error reading line")); + i::SmartPointer<char> line(i::ReadLine("")); + if (*line == NULL) { + return Null(); } - int len = strlen(line_buf); - if (line_buf[len - 1] == '\n') { + size_t len = strlen(*line); + if (len > 0 && line[len - 1] == '\n') { --len; } - return String::New(line_buf, len); + return String::New(*line, len); } diff --git a/V8Binding/v8/src/d8.js b/V8Binding/v8/src/d8.js index 2d52170..7249eca 100644 --- a/V8Binding/v8/src/d8.js +++ b/V8Binding/v8/src/d8.js @@ -1143,7 +1143,7 @@ function DebugResponseDetails(response) { * @constructor */ function ProtocolPackage(json) { - this.packet_ = eval('(' + json + ')'); + this.packet_ = JSON.parse(json); this.refs_ = []; if (this.packet_.refs) { for (var i = 0; i < this.packet_.refs.length; i++) { diff --git a/V8Binding/v8/src/debug.cc b/V8Binding/v8/src/debug.cc index faeb29b..cfbadf3 100644 --- a/V8Binding/v8/src/debug.cc +++ b/V8Binding/v8/src/debug.cc @@ -75,6 +75,9 @@ BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info, BreakLocatorType type) { debug_info_ = debug_info; type_ = type; + // Get the stub early to avoid possible GC during iterations. We may need + // this stub to detect debugger calls generated from debugger statements. + debug_break_stub_ = RuntimeStub(Runtime::kDebugBreak, 0).GetCode(); reloc_iterator_ = NULL; reloc_iterator_original_ = NULL; Reset(); // Initialize the rest of the member variables. @@ -126,6 +129,10 @@ void BreakLocationIterator::Next() { return; } if (code->kind() == Code::STUB) { + if (IsDebuggerStatement()) { + break_point_++; + return; + } if (type_ == ALL_BREAK_LOCATIONS) { if (Debug::IsBreakStub(code)) { break_point_++; @@ -238,7 +245,7 @@ void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) { if (!HasBreakPoint()) { SetDebugBreak(); } - ASSERT(IsDebugBreak()); + ASSERT(IsDebugBreak() || IsDebuggerStatement()); // Set the break point information. DebugInfo::SetBreakPoint(debug_info_, code_position(), position(), statement_position(), @@ -258,6 +265,11 @@ void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) { void BreakLocationIterator::SetOneShot() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + // If there is a real break point here no more to do. if (HasBreakPoint()) { ASSERT(IsDebugBreak()); @@ -270,6 +282,11 @@ void BreakLocationIterator::SetOneShot() { void BreakLocationIterator::ClearOneShot() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + // If there is a real break point here no more to do. if (HasBreakPoint()) { ASSERT(IsDebugBreak()); @@ -283,6 +300,11 @@ void BreakLocationIterator::ClearOneShot() { void BreakLocationIterator::SetDebugBreak() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + // If there is already a break point here just return. This might happen if // the same code is flooded with break points twice. Flooding the same // function twice might happen when stepping in a function with an exception @@ -303,6 +325,11 @@ void BreakLocationIterator::SetDebugBreak() { void BreakLocationIterator::ClearDebugBreak() { + // Debugger statement always calls debugger. No need to modify it. + if (IsDebuggerStatement()) { + return; + } + if (RelocInfo::IsJSReturn(rmode())) { // Restore the frame exit code. ClearDebugBreakAtReturn(); @@ -317,10 +344,10 @@ void BreakLocationIterator::ClearDebugBreak() { void BreakLocationIterator::PrepareStepIn() { HandleScope scope; - // Step in can only be prepared if currently positioned on an IC call or - // construct call. + // Step in can only be prepared if currently positioned on an IC call, + // construct call or CallFunction stub call. Address target = rinfo()->target_address(); - Code* code = Code::GetCodeFromTargetAddress(target); + Handle<Code> code(Code::GetCodeFromTargetAddress(target)); if (code->is_call_stub()) { // Step in through IC call is handled by the runtime system. Therefore make // sure that the any current IC is cleared and the runtime system is @@ -334,11 +361,29 @@ void BreakLocationIterator::PrepareStepIn() { rinfo()->set_target_address(stub->entry()); } } else { +#ifdef DEBUG + // All the following stuff is needed only for assertion checks so the code + // is wrapped in ifdef. + Handle<Code> maybe_call_function_stub = code; + if (IsDebugBreak()) { + Address original_target = original_rinfo()->target_address(); + maybe_call_function_stub = + Handle<Code>(Code::GetCodeFromTargetAddress(original_target)); + } + bool is_call_function_stub = + (maybe_call_function_stub->kind() == Code::STUB && + maybe_call_function_stub->major_key() == CodeStub::CallFunction); + // Step in through construct call requires no changes to the running code. // Step in through getters/setters should already be prepared as well // because caller of this function (Debug::PrepareStep) is expected to // flood the top frame's function with one shot breakpoints. - ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()); + // Step in through CallFunction stub should also be prepared by caller of + // this function (Debug::PrepareStep) which should flood target function + // with breakpoints. + ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub() + || is_call_function_stub); +#endif } } @@ -409,6 +454,21 @@ void BreakLocationIterator::ClearDebugBreakAtIC() { } +bool BreakLocationIterator::IsDebuggerStatement() { + if (RelocInfo::IsCodeTarget(rmode())) { + Address target = original_rinfo()->target_address(); + Code* code = Code::GetCodeFromTargetAddress(target); + if (code->kind() == Code::STUB) { + CodeStub::Major major_key = code->major_key(); + if (major_key == CodeStub::Runtime) { + return (*debug_break_stub_ == code); + } + } + } + return false; +} + + Object* BreakLocationIterator::BreakPointObjects() { return debug_info_->GetBreakPointObjects(code_position()); } @@ -661,7 +721,7 @@ bool Debug::CompileDebuggerScript(int index) { // Check for caught exceptions. if (caught_exception) { Handle<Object> message = MessageHandler::MakeMessageObject( - "error_loading_debugger", NULL, HandleVector<Object>(&result, 1), + "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(), Handle<String>()); MessageHandler::ReportMessage(NULL, message); return false; @@ -1092,6 +1152,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { bool is_call_target = false; bool is_load_or_store = false; bool is_inline_cache_stub = false; + Handle<Code> call_function_stub; if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) { Address target = it.rinfo()->target_address(); Code* code = Code::GetCodeFromTargetAddress(target); @@ -1102,6 +1163,22 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { is_inline_cache_stub = true; is_load_or_store = !is_call_target; } + + // Check if target code is CallFunction stub. + Code* maybe_call_function_stub = code; + // If there is a breakpoint at this line look at the original code to + // check if it is a CallFunction stub. + if (it.IsDebugBreak()) { + Address original_target = it.original_rinfo()->target_address(); + maybe_call_function_stub = + Code::GetCodeFromTargetAddress(original_target); + } + if (maybe_call_function_stub->kind() == Code::STUB && + maybe_call_function_stub->major_key() == CodeStub::CallFunction) { + // Save reference to the code as we may need it to find out arguments + // count for 'step in' later. + call_function_stub = Handle<Code>(maybe_call_function_stub); + } } // If this is the last break code target step out is the only possibility. @@ -1114,7 +1191,8 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { JSFunction* function = JSFunction::cast(frames_it.frame()->function()); FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared())); } - } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode())) + } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()) || + !call_function_stub.is_null()) || step_action == StepNext || step_action == StepMin) { // Step next or step min. @@ -1126,6 +1204,45 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { debug_info->code()->SourceStatementPosition(frame->pc()); thread_local_.last_fp_ = frame->fp(); } else { + // If it's CallFunction stub ensure target function is compiled and flood + // it with one shot breakpoints. + if (!call_function_stub.is_null()) { + // Find out number of arguments from the stub minor key. + // Reverse lookup required as the minor key cannot be retrieved + // from the code object. + Handle<Object> obj( + Heap::code_stubs()->SlowReverseLookup(*call_function_stub)); + ASSERT(*obj != Heap::undefined_value()); + ASSERT(obj->IsSmi()); + // Get the STUB key and extract major and minor key. + uint32_t key = Smi::cast(*obj)->value(); + // Argc in the stub is the number of arguments passed - not the + // expected arguments of the called function. + int call_function_arg_count = CodeStub::MinorKeyFromKey(key); + ASSERT(call_function_stub->major_key() == + CodeStub::MajorKeyFromKey(key)); + + // Find target function on the expression stack. + // Expression stack lools like this (top to bottom): + // argN + // ... + // arg0 + // Receiver + // Function to call + int expressions_count = frame->ComputeExpressionsCount(); + ASSERT(expressions_count - 2 - call_function_arg_count >= 0); + Object* fun = frame->GetExpression( + expressions_count - 2 - call_function_arg_count); + if (fun->IsJSFunction()) { + Handle<JSFunction> js_function(JSFunction::cast(fun)); + // Don't step into builtins. + if (!js_function->IsBuiltin()) { + // It will also compile target function if it's not compiled yet. + FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared())); + } + } + } + // Fill the current function with one-shot break points even for step in on // a call target as the function called might be a native function for // which step in will not stop. It also prepares for stepping in @@ -2001,9 +2118,7 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event, event_listener_data_.location() }; Handle<Object> result = Execution::TryCall(fun, Top::global(), argc, argv, &caught_exception); - if (caught_exception) { - // Silently ignore exceptions from debug event listeners. - } + // Silently ignore exceptions from debug event listeners. } } } diff --git a/V8Binding/v8/src/debug.h b/V8Binding/v8/src/debug.h index 5b0273a..38789e1 100644 --- a/V8Binding/v8/src/debug.h +++ b/V8Binding/v8/src/debug.h @@ -119,6 +119,8 @@ class BreakLocationIterator { return reloc_iterator_original_->rinfo()->rmode(); } + bool IsDebuggerStatement(); + protected: bool RinfoDone() const; void RinfoNext(); @@ -128,6 +130,7 @@ class BreakLocationIterator { int position_; int statement_position_; Handle<DebugInfo> debug_info_; + Handle<Code> debug_break_stub_; RelocIterator* reloc_iterator_; RelocIterator* reloc_iterator_original_; diff --git a/V8Binding/v8/src/execution.cc b/V8Binding/v8/src/execution.cc index 18c5bb8..04ec905 100644 --- a/V8Binding/v8/src/execution.cc +++ b/V8Binding/v8/src/execution.cc @@ -156,9 +156,12 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func, ASSERT(catcher.HasCaught()); ASSERT(Top::has_pending_exception()); ASSERT(Top::external_caught_exception()); - bool is_bottom_call = HandleScopeImplementer::instance()->CallDepthIsZero(); - Top::OptionalRescheduleException(is_bottom_call, true); - result = v8::Utils::OpenHandle(*catcher.Exception()); + if (Top::pending_exception() == Heap::termination_exception()) { + result = Factory::termination_exception(); + } else { + result = v8::Utils::OpenHandle(*catcher.Exception()); + } + Top::OptionalRescheduleException(true); } ASSERT(!Top::has_pending_exception()); diff --git a/V8Binding/v8/src/globals.h b/V8Binding/v8/src/globals.h index 3b8ee92..efe0127 100644 --- a/V8Binding/v8/src/globals.h +++ b/V8Binding/v8/src/globals.h @@ -47,7 +47,14 @@ namespace internal { #define V8_HOST_ARCH_ARM 1 #define V8_HOST_ARCH_32_BIT 1 #else -#error Your architecture was not detected as supported by v8 +#error Your host architecture was not detected as supported by v8 +#endif + +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) +#define V8_TARGET_CAN_READ_UNALIGNED 1 +#elif V8_TARGET_ARCH_ARM +#else +#error Your target architecture is not supported by v8 #endif // Support for alternative bool type. This is only enabled if the code is diff --git a/V8Binding/v8/src/heap.cc b/V8Binding/v8/src/heap.cc index 3b2bd40..c29815e 100644 --- a/V8Binding/v8/src/heap.cc +++ b/V8Binding/v8/src/heap.cc @@ -39,6 +39,9 @@ #include "scanner.h" #include "scopeinfo.h" #include "v8threads.h" +#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP +#include "regexp-macro-assembler.h" +#endif namespace v8 { namespace internal { @@ -254,6 +257,7 @@ void Heap::ReportStatisticsAfterGC() { void Heap::GarbageCollectionPrologue() { + TranscendentalCache::Clear(); gc_count_++; #ifdef DEBUG ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); @@ -1320,6 +1324,14 @@ void Heap::CreateCEntryStub() { } +#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP +void Heap::CreateRegExpCEntryStub() { + RegExpCEntryStub stub; + set_re_c_entry_code(*stub.GetCode()); +} +#endif + + void Heap::CreateCEntryDebugBreakStub() { CEntryDebugBreakStub stub; set_c_entry_debug_break_code(*stub.GetCode()); @@ -1356,6 +1368,9 @@ void Heap::CreateFixedStubs() { Heap::CreateCEntryDebugBreakStub(); Heap::CreateJSEntryStub(); Heap::CreateJSConstructEntryStub(); +#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP + Heap::CreateRegExpCEntryStub(); +#endif } @@ -3253,15 +3268,13 @@ bool Heap::Setup(bool create_heap_objects) { void Heap::SetStackLimit(intptr_t limit) { - // We don't use the stack limit in the roots array on x86-64 yet, but since - // pointers are generally out of range of Smis we should set the value either. -#if !V8_HOST_ARCH_64_BIT + // On 64 bit machines, pointers are generally out of range of Smis. We write + // something that looks like an out of range Smi to the GC. + // Set up the special root array entry containing the stack guard. // This is actually an address, but the tag makes the GC ignore it. - set_stack_limit(Smi::FromInt(limit >> kSmiTagSize)); -#else - set_stack_limit(Smi::FromInt(0)); -#endif + roots_[kStackLimitRootIndex] = + reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag); } @@ -3974,4 +3987,30 @@ bool Heap::GarbageCollectionGreedyCheck() { } #endif + +TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t) + : type_(t) { + uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't + uint32_t in1 = 0xffffffffu; // generated by the FPU. + for (int i = 0; i < kCacheSize; i++) { + elements_[i].in[0] = in0; + elements_[i].in[1] = in1; + elements_[i].output = NULL; + } +} + + +TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches]; + + +void TranscendentalCache::Clear() { + for (int i = 0; i < kNumberOfCaches; i++) { + if (caches_[i] != NULL) { + delete caches_[i]; + caches_[i] = NULL; + } + } +} + + } } // namespace v8::internal diff --git a/V8Binding/v8/src/heap.h b/V8Binding/v8/src/heap.h index 212dfa7..028dd11 100644 --- a/V8Binding/v8/src/heap.h +++ b/V8Binding/v8/src/heap.h @@ -28,15 +28,31 @@ #ifndef V8_HEAP_H_ #define V8_HEAP_H_ +#include <math.h> + #include "zone-inl.h" + namespace v8 { namespace internal { // Defines all the roots in Heap. -#define STRONG_ROOT_LIST(V) \ - V(Map, meta_map, MetaMap) \ +#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \ + /* Cluster the most popular ones in a few cache lines here at the top. */ \ + V(Smi, stack_limit, StackLimit) \ + V(Object, undefined_value, UndefinedValue) \ + V(Object, the_hole_value, TheHoleValue) \ + V(Object, null_value, NullValue) \ + V(Object, true_value, TrueValue) \ + V(Object, false_value, FalseValue) \ V(Map, heap_number_map, HeapNumberMap) \ + V(Map, global_context_map, GlobalContextMap) \ + V(Map, fixed_array_map, FixedArrayMap) \ + V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ + V(Map, meta_map, MetaMap) \ + V(Object, termination_exception, TerminationException) \ + V(Map, hash_table_map, HashTableMap) \ + V(FixedArray, empty_fixed_array, EmptyFixedArray) \ V(Map, short_string_map, ShortStringMap) \ V(Map, medium_string_map, MediumStringMap) \ V(Map, long_string_map, LongStringMap) \ @@ -95,11 +111,8 @@ namespace internal { V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \ V(Map, byte_array_map, ByteArrayMap) \ V(Map, pixel_array_map, PixelArrayMap) \ - V(Map, fixed_array_map, FixedArrayMap) \ - V(Map, hash_table_map, HashTableMap) \ V(Map, context_map, ContextMap) \ V(Map, catch_context_map, CatchContextMap) \ - V(Map, global_context_map, GlobalContextMap) \ V(Map, code_map, CodeMap) \ V(Map, oddball_map, OddballMap) \ V(Map, global_property_cell_map, GlobalPropertyCellMap) \ @@ -109,17 +122,9 @@ namespace internal { V(Map, one_pointer_filler_map, OnePointerFillerMap) \ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ V(Object, nan_value, NanValue) \ - V(Object, undefined_value, UndefinedValue) \ - V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ - V(Object, termination_exception, TerminationException) \ V(Object, minus_zero_value, MinusZeroValue) \ - V(Object, null_value, NullValue) \ - V(Object, true_value, TrueValue) \ - V(Object, false_value, FalseValue) \ V(String, empty_string, EmptyString) \ - V(FixedArray, empty_fixed_array, EmptyFixedArray) \ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ - V(Object, the_hole_value, TheHoleValue) \ V(Map, neander_map, NeanderMap) \ V(JSObject, message_listeners, MessageListeners) \ V(Proxy, prototype_accessors, PrototypeAccessors) \ @@ -133,8 +138,14 @@ namespace internal { V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, natives_source_cache, NativesSourceCache) \ V(Object, last_script_id, LastScriptId) \ - V(Smi, stack_limit, StackLimit) +#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP +#define STRONG_ROOT_LIST(V) \ + UNCONDITIONAL_STRONG_ROOT_LIST(V) \ + V(Code, re_c_entry_code, RegExpCEntryCode) +#else +#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V) +#endif #define ROOT_LIST(V) \ STRONG_ROOT_LIST(V) \ @@ -1024,6 +1035,8 @@ class Heap : public AllStatic { static void CreateCEntryDebugBreakStub(); static void CreateJSEntryStub(); static void CreateJSConstructEntryStub(); + static void CreateRegExpCEntryStub(); + static void CreateFixedStubs(); static Object* CreateOddball(Map* map, @@ -1509,6 +1522,91 @@ class GCTracer BASE_EMBEDDED { int previous_marked_count_; }; + +class TranscendentalCache { + public: + enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches}; + + explicit TranscendentalCache(Type t); + + // Returns a heap number with f(input), where f is a math function specified + // by the 'type' argument. + static inline Object* Get(Type type, double input) { + TranscendentalCache* cache = caches_[type]; + if (cache == NULL) { + caches_[type] = cache = new TranscendentalCache(type); + } + return cache->Get(input); + } + + // The cache contains raw Object pointers. This method disposes of + // them before a garbage collection. + static void Clear(); + + private: + inline Object* Get(double input) { + Converter c; + c.dbl = input; + int hash = Hash(c); + Element e = elements_[hash]; + if (e.in[0] == c.integers[0] && + e.in[1] == c.integers[1]) { + ASSERT(e.output != NULL); + return e.output; + } + double answer = Calculate(input); + Object* heap_number = Heap::AllocateHeapNumber(answer); + if (!heap_number->IsFailure()) { + elements_[hash].in[0] = c.integers[0]; + elements_[hash].in[1] = c.integers[1]; + elements_[hash].output = heap_number; + } + return heap_number; + } + + inline double Calculate(double input) { + switch (type_) { + case ACOS: + return acos(input); + case ASIN: + return asin(input); + case ATAN: + return atan(input); + case COS: + return cos(input); + case EXP: + return exp(input); + case LOG: + return log(input); + case SIN: + return sin(input); + case TAN: + return tan(input); + default: + return 0.0; // Never happens. + } + } + static const int kCacheSize = 512; + struct Element { + uint32_t in[2]; + Object* output; + }; + union Converter { + double dbl; + uint32_t integers[2]; + }; + inline static int Hash(const Converter& c) { + uint32_t hash = (c.integers[0] ^ c.integers[1]); + hash ^= hash >> 16; + hash ^= hash >> 8; + return (hash & (kCacheSize - 1)); + } + static TranscendentalCache* caches_[kNumberOfCaches]; + Element elements_[kCacheSize]; + Type type_; +}; + + } } // namespace v8::internal #endif // V8_HEAP_H_ diff --git a/V8Binding/v8/src/ia32/builtins-ia32.cc b/V8Binding/v8/src/ia32/builtins-ia32.cc index 55dc92d..7793e49 100644 --- a/V8Binding/v8/src/ia32/builtins-ia32.cc +++ b/V8Binding/v8/src/ia32/builtins-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -129,11 +129,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // eax: initial map __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); __ shl(edi, kPointerSizeLog2); - // Make sure that the maximum heap object size will never cause us - // problem here, because it is always greater than the maximum - // instance size that can be represented in a byte. - ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize); - __ AllocateObjectInNewSpace(edi, ebx, edi, no_reg, &rt_call, false); + __ AllocateObjectInNewSpace(edi, + ebx, + edi, + no_reg, + &rt_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields. // eax: initial map // ebx: JSObject @@ -188,8 +189,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ebx: JSObject // edi: start of next object (will be start of FixedArray) // edx: number of elements in properties array - ASSERT(Heap::MaxObjectSizeInPagedSpace() > - (FixedArray::kHeaderSize + 255*kPointerSize)); __ AllocateObjectInNewSpace(FixedArray::kHeaderSize, times_pointer_size, edx, @@ -197,7 +196,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ecx, no_reg, &undo_allocation, - true); + RESULT_CONTAINS_TOP); // Initialize the FixedArray. // ebx: JSObject @@ -245,10 +244,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { } // Allocate the new receiver object using the runtime call. - // edi: function (constructor) __ bind(&rt_call); // Must restore edi (constructor) before calling runtime. __ mov(edi, Operand(esp, 0)); + // edi: function (constructor) __ push(edi); __ CallRuntime(Runtime::kNewObject, 1); __ mov(ebx, Operand(eax)); // store result in ebx diff --git a/V8Binding/v8/src/ia32/codegen-ia32.cc b/V8Binding/v8/src/ia32/codegen-ia32.cc index a9face1..c2728d7 100644 --- a/V8Binding/v8/src/ia32/codegen-ia32.cc +++ b/V8Binding/v8/src/ia32/codegen-ia32.cc @@ -6954,12 +6954,11 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm, scratch1, scratch2, need_gc, - false); + TAG_OBJECT); - // Set the map and tag the result. - __ mov(Operand(result, HeapObject::kMapOffset), + // Set the map. + __ mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(Factory::heap_number_map())); - __ or_(Operand(result), Immediate(kHeapObjectTag)); } diff --git a/V8Binding/v8/src/ia32/ic-ia32.cc b/V8Binding/v8/src/ia32/ic-ia32.cc index fa9b8a2..e39808b 100644 --- a/V8Binding/v8/src/ia32/ic-ia32.cc +++ b/V8Binding/v8/src/ia32/ic-ia32.cc @@ -604,7 +604,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) { __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset)); __ cmp(eax, FIRST_JS_OBJECT_TYPE); - __ j(less, &miss, not_taken); + __ j(below, &miss, not_taken); // If this assert fails, we have to check upper bound too. ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc index 754b74a..241275d 100644 --- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc +++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -620,18 +620,22 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } -void MacroAssembler::LoadAllocationTopHelper( - Register result, - Register result_end, - Register scratch, - bool result_contains_top_on_entry) { +void MacroAssembler::LoadAllocationTopHelper(Register result, + Register result_end, + Register scratch, + AllocationFlags flags) { ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(); // Just return if allocation top is already known. - if (result_contains_top_on_entry) { + if ((flags & RESULT_CONTAINS_TOP) != 0) { // No use of scratch if allocation top is provided. ASSERT(scratch.is(no_reg)); +#ifdef DEBUG + // Assert that result actually contains top on entry. + cmp(result, Operand::StaticVariable(new_space_allocation_top)); + Check(equal, "Unexpected allocation top"); +#endif return; } @@ -659,20 +663,17 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end, } } -void MacroAssembler::AllocateObjectInNewSpace( - int object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { + +void MacroAssembler::AllocateObjectInNewSpace(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -683,25 +684,26 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag result if requested. + if ((flags & TAG_OBJECT) != 0) { + or_(Operand(result), Immediate(kHeapObjectTag)); + } } -void MacroAssembler::AllocateObjectInNewSpace( - int header_size, - ScaleFactor element_size, - Register element_count, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { +void MacroAssembler::AllocateObjectInNewSpace(int header_size, + ScaleFactor element_size, + Register element_count, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -712,24 +714,24 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag result if requested. + if ((flags & TAG_OBJECT) != 0) { + or_(Operand(result), Immediate(kHeapObjectTag)); + } } -void MacroAssembler::AllocateObjectInNewSpace( - Register object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { +void MacroAssembler::AllocateObjectInNewSpace(Register object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); - + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -743,6 +745,11 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag result if requested. + if ((flags & TAG_OBJECT) != 0) { + or_(Operand(result), Immediate(kHeapObjectTag)); + } } diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.h b/V8Binding/v8/src/ia32/macro-assembler-ia32.h index f10ec16..21b2eb5 100644 --- a/V8Binding/v8/src/ia32/macro-assembler-ia32.h +++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -37,25 +37,6 @@ namespace internal { class JumpTarget; -// Helper types to make flags easier to read at call sites. -enum InvokeFlag { - CALL_FUNCTION, - JUMP_FUNCTION -}; - -enum CodeLocation { - IN_JAVASCRIPT, - IN_JS_ENTRY, - IN_C_ENTRY -}; - -enum HandlerType { - TRY_CATCH_HANDLER, - TRY_FINALLY_HANDLER, - JS_ENTRY_HANDLER -}; - - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -201,7 +182,7 @@ class MacroAssembler: public Assembler { Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); void AllocateObjectInNewSpace(int header_size, ScaleFactor element_size, @@ -210,14 +191,14 @@ class MacroAssembler: public Assembler { Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); void AllocateObjectInNewSpace(Register object_size, Register result, Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); // Undo allocation in new space. The object passed and objects allocated after // it will no longer be allocated. Make sure that no pointers are left to the @@ -350,7 +331,7 @@ class MacroAssembler: public Assembler { void LoadAllocationTopHelper(Register result, Register result_end, Register scratch, - bool result_contains_top_on_entry); + AllocationFlags flags); void UpdateAllocationTopHelper(Register result_end, Register scratch); }; diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc index bc81076..7af4e89 100644 --- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -102,6 +102,7 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32( success_label_(), backtrack_label_(), exit_label_() { + ASSERT_EQ(0, registers_to_save % 2); __ jmp(&entry_label_); // We'll write the entry code later. __ bind(&start_label_); // And then continue from here. } @@ -337,8 +338,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ add(edx, Operand(esi)); __ mov(Operand(esp, 0 * kPointerSize), edx); - Address function_address = FUNCTION_ADDR(&CaseInsensitiveCompareUC16); - CallCFunction(function_address, argument_count); + ExternalReference compare = + ExternalReference::re_case_insensitive_compare_uc16(); + CallCFunction(compare, argument_count); // Pop original values before reacting on result value. __ pop(ebx); __ pop(backtrack_stackpointer()); @@ -745,7 +747,8 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ lea(eax, Operand(ebp, kStackHighEnd)); __ mov(Operand(esp, 1 * kPointerSize), eax); __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer()); - CallCFunction(FUNCTION_ADDR(&GrowStack), num_arguments); + ExternalReference grow_stack = ExternalReference::re_grow_stack(); + CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. __ or_(eax, Operand(eax)); @@ -817,7 +820,9 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset, int characters) { ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) - CheckPosition(cp_offset + characters - 1, on_end_of_input); + if (check_bounds) { + CheckPosition(cp_offset + characters - 1, on_end_of_input); + } LoadCurrentCharacterUnchecked(cp_offset, characters); } @@ -913,7 +918,9 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) { // Next address on the stack (will be address of return address). __ lea(eax, Operand(esp, -kPointerSize)); __ mov(Operand(esp, 0 * kPointerSize), eax); - CallCFunction(FUNCTION_ADDR(&CheckStackGuardState), num_arguments); + ExternalReference check_stack_guard = + ExternalReference::re_check_stack_guard_state(); + CallCFunction(check_stack_guard, num_arguments); } @@ -996,22 +1003,6 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address, } -Address RegExpMacroAssemblerIA32::GrowStack(Address stack_pointer, - Address* stack_base) { - size_t size = RegExpStack::stack_capacity(); - Address old_stack_base = RegExpStack::stack_base(); - ASSERT(old_stack_base == *stack_base); - ASSERT(stack_pointer <= old_stack_base); - ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size); - Address new_stack_base = RegExpStack::EnsureCapacity(size * 2); - if (new_stack_base == NULL) { - return NULL; - } - *stack_base = new_stack_base; - return new_stack_base - (old_stack_base - stack_pointer); -} - - Operand RegExpMacroAssemblerIA32::register_location(int register_index) { ASSERT(register_index < (1<<30)); if (num_registers_ <= register_index) { @@ -1135,9 +1126,9 @@ void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) { } -void RegExpMacroAssemblerIA32::CallCFunction(Address function_address, +void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function, int num_arguments) { - __ mov(Operand(eax), Immediate(reinterpret_cast<int32_t>(function_address))); + __ mov(Operand(eax), Immediate(function)); __ call(Operand(eax)); if (OS::ActivationFrameAlignment() != 0) { __ mov(esp, Operand(esp, num_arguments * kPointerSize)); @@ -1172,6 +1163,10 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset, } +void RegExpCEntryStub::Generate(MacroAssembler* masm_) { + __ int3(); // Unused on ia32. +} + #undef __ #endif // V8_NATIVE_REGEXP diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h index d114392..5ffd462 100644 --- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h +++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h @@ -107,6 +107,13 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); + // Called from RegExp if the stack-guard is triggered. + // If the code object is relocated, the return address is fixed before + // returning. + static int CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame); + private: // Offsets from ebp of function parameters and stored registers. static const int kFramePointer = 0; @@ -144,23 +151,9 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - // Called from RegExp if the stack-guard is triggered. - // If the code object is relocated, the return address is fixed before - // returning. - static int CheckStackGuardState(Address* return_address, - Code* re_code, - Address re_frame); - // Generate a call to CheckStackGuardState. void CallCheckStackGuardState(Register scratch); - // Called from RegExp if the backtrack stack limit is hit. - // Tries to expand the stack. Returns the new stack-pointer if - // successful, and updates the stack_top address, or returns 0 if unable - // to grow the stack. - // This function must not trigger a garbage collection. - static Address GrowStack(Address stack_pointer, Address* stack_top); - // The ebp-relative location of a regexp register. Operand register_location(int register_index); @@ -209,7 +202,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { // by FrameAlign. The called function is not allowed to trigger a garbage // collection, since that might move the code and invalidate the return // address (unless this is somehow accounted for). - inline void CallCFunction(Address function_address, int num_arguments); + inline void CallCFunction(ExternalReference function, int num_arguments); MacroAssembler* masm_; diff --git a/V8Binding/v8/src/ia32/simulator-ia32.h b/V8Binding/v8/src/ia32/simulator-ia32.h index 4d02c03..3bed268 100644 --- a/V8Binding/v8/src/ia32/simulator-ia32.h +++ b/V8Binding/v8/src/ia32/simulator-ia32.h @@ -44,4 +44,9 @@ (reinterpret_cast<uintptr_t>(this) >= limit ? \ reinterpret_cast<uintptr_t>(this) - limit : 0) +// Call the generated regexp code directly. The entry function pointer should +// expect seven int/pointer sized arguments and return an int. +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ + entry(p0, p1, p2, p3, p4, p5, p6) + #endif // V8_IA32_SIMULATOR_IA32_H_ diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc index f599f79..74c982f 100644 --- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc +++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc @@ -1783,29 +1783,29 @@ Object* ConstructStubCompiler::CompileConstructStub( // ebx: initial map __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset)); __ shl(ecx, kPointerSizeLog2); - // Make sure that the maximum heap object size will never cause us - // problems here. - ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize); - __ AllocateObjectInNewSpace(ecx, edx, ecx, no_reg, &generic_stub_call, false); + __ AllocateObjectInNewSpace(ecx, + edx, + ecx, + no_reg, + &generic_stub_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields and add the heap tag. // ebx: initial map - // edx: JSObject + // edx: JSObject (untagged) __ mov(Operand(edx, JSObject::kMapOffset), ebx); __ mov(ebx, Factory::empty_fixed_array()); __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx); __ mov(Operand(edx, JSObject::kElementsOffset), ebx); - __ or_(Operand(edx), Immediate(kHeapObjectTag)); // Push the allocated object to the stack. This is the object that will be - // returned. + // returned (after it is tagged). __ push(edx); // eax: argc - // edx: JSObject + // edx: JSObject (untagged) // Load the address of the first in-object property into edx. __ lea(edx, Operand(edx, JSObject::kHeaderSize)); - __ xor_(Operand(edx), Immediate(kHeapObjectTag)); // Clear heap object tag. // Calculate the location of the first argument. The stack contains the // allocated object and the return address on top of the argc arguments. __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize)); @@ -1846,9 +1846,10 @@ Object* ConstructStubCompiler::CompileConstructStub( __ mov(Operand(edx, i * kPointerSize), edi); } - // Move argc to ebx and retreive the JSObject to return. + // Move argc to ebx and retrieve and tag the JSObject to return. __ mov(ebx, eax); __ pop(eax); + __ or_(Operand(eax), Immediate(kHeapObjectTag)); // Remove caller arguments and receiver from the stack and return. __ pop(ecx); diff --git a/V8Binding/v8/src/jsregexp.cc b/V8Binding/v8/src/jsregexp.cc index 06208aa..e518662 100644 --- a/V8Binding/v8/src/jsregexp.cc +++ b/V8Binding/v8/src/jsregexp.cc @@ -51,6 +51,7 @@ #include "x64/macro-assembler-x64.h" #include "x64/regexp-macro-assembler-x64.h" #elif V8_TARGET_ARCH_ARM +#include "arm/macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h" #else #error Unsupported target architecture. @@ -419,9 +420,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data())); #ifdef V8_NATIVE_REGEXP -#ifdef V8_TARGET_ARCH_ARM - UNIMPLEMENTED(); -#else // Native regexp supported. + OffsetsVector captures(number_of_capture_registers); int* captures_vector = captures.vector(); NativeRegExpMacroAssembler::Result res; @@ -455,9 +454,9 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, SetCapture(*array, i, captures_vector[i]); SetCapture(*array, i + 1, captures_vector[i + 1]); } -#endif // Native regexp supported. #else // ! V8_NATIVE_REGEXP + bool is_ascii = subject->IsAsciiRepresentation(); if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) { return Handle<Object>::null(); @@ -487,6 +486,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, SetCapture(*array, i, register_vector[i]); SetCapture(*array, i + 1, register_vector[i + 1]); } + #endif // V8_NATIVE_REGEXP SetLastCaptureCount(*array, number_of_capture_registers); @@ -1723,6 +1723,8 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler, GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE); if (details->cannot_match()) return false; if (!details->Rationalize(compiler->ascii())) return false; + ASSERT(details->characters() == 1 || + compiler->macro_assembler()->CanReadUnaligned()); uint32_t mask = details->mask(); uint32_t value = details->value(); @@ -2522,20 +2524,20 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) { int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler) { int preload_characters = EatsAtLeast(4, 0); -#ifdef V8_HOST_CAN_READ_UNALIGNED - bool ascii = compiler->ascii(); - if (ascii) { - if (preload_characters > 4) preload_characters = 4; - // We can't preload 3 characters because there is no machine instruction - // to do that. We can't just load 4 because we could be reading - // beyond the end of the string, which could cause a memory fault. - if (preload_characters == 3) preload_characters = 2; + if (compiler->macro_assembler()->CanReadUnaligned()) { + bool ascii = compiler->ascii(); + if (ascii) { + if (preload_characters > 4) preload_characters = 4; + // We can't preload 3 characters because there is no machine instruction + // to do that. We can't just load 4 because we could be reading + // beyond the end of the string, which could cause a memory fault. + if (preload_characters == 3) preload_characters = 2; + } else { + if (preload_characters > 2) preload_characters = 2; + } } else { - if (preload_characters > 2) preload_characters = 2; + if (preload_characters > 1) preload_characters = 1; } -#else - if (preload_characters > 1) preload_characters = 1; -#endif return preload_characters; } @@ -4470,16 +4472,12 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data, is_ascii ? NativeRegExpMacroAssembler::ASCII : NativeRegExpMacroAssembler::UC16; -#ifdef V8_TARGET_ARCH_IA32 - RegExpMacroAssemblerIA32 macro_assembler(mode, - (data->capture_count + 1) * 2); -#endif -#ifdef V8_TARGET_ARCH_X64 - RegExpMacroAssemblerX64 macro_assembler(mode, - (data->capture_count + 1) * 2); -#endif -#ifdef V8_TARGET_ARCH_ARM - UNIMPLEMENTED(); +#if V8_TARGET_ARCH_IA32 + RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2); +#elif V8_TARGET_ARCH_X64 + RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2); +#elif V8_TARGET_ARCH_ARM + RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2); #endif #else // ! V8_NATIVE_REGEXP diff --git a/V8Binding/v8/src/list.h b/V8Binding/v8/src/list.h index b6c06d8..dd7ea1c 100644 --- a/V8Binding/v8/src/list.h +++ b/V8Binding/v8/src/list.h @@ -62,9 +62,8 @@ class List { return data_[i]; } inline T& at(int i) const { return operator[](i); } - inline T& last() const { - return at(length_ - 1); - } + inline T& last() const { return at(length_ - 1); } + inline T& first() const { return at(0); } INLINE(bool is_empty() const) { return length_ == 0; } INLINE(int length() const) { return length_; } diff --git a/V8Binding/v8/src/macro-assembler.h b/V8Binding/v8/src/macro-assembler.h index 983802e..5631dec 100644 --- a/V8Binding/v8/src/macro-assembler.h +++ b/V8Binding/v8/src/macro-assembler.h @@ -28,6 +28,40 @@ #ifndef V8_MACRO_ASSEMBLER_H_ #define V8_MACRO_ASSEMBLER_H_ + +// Helper types to make boolean flag easier to read at call-site. +enum InvokeFlag { + CALL_FUNCTION, + JUMP_FUNCTION +}; + + +enum CodeLocation { + IN_JAVASCRIPT, + IN_JS_ENTRY, + IN_C_ENTRY +}; + + +enum HandlerType { + TRY_CATCH_HANDLER, + TRY_FINALLY_HANDLER, + JS_ENTRY_HANDLER +}; + + +// Flags used for the AllocateObjectInNewSpace functions. +enum AllocationFlags { + // No special flags. + NO_ALLOCATION_FLAGS = 0, + // Return the pointer to the allocated already tagged as a heap object. + TAG_OBJECT = 1 << 0, + // The content of the result register already contains the allocation top in + // new space. + RESULT_CONTAINS_TOP = 1 << 1 +}; + + #if V8_TARGET_ARCH_IA32 #include "assembler.h" #include "ia32/assembler-ia32.h" diff --git a/V8Binding/v8/src/mark-compact.cc b/V8Binding/v8/src/mark-compact.cc index d139093..e682fe2 100644 --- a/V8Binding/v8/src/mark-compact.cc +++ b/V8Binding/v8/src/mark-compact.cc @@ -41,6 +41,7 @@ namespace internal { bool MarkCompactCollector::force_compaction_ = false; bool MarkCompactCollector::compacting_collection_ = false; +bool MarkCompactCollector::compact_on_next_gc_ = false; int MarkCompactCollector::previous_marked_count_ = 0; GCTracer* MarkCompactCollector::tracer_ = NULL; @@ -104,35 +105,15 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { // variable. tracer_ = tracer; - static const int kFragmentationLimit = 50; // Percent. #ifdef DEBUG ASSERT(state_ == IDLE); state_ = PREPARE_GC; #endif ASSERT(!FLAG_always_compact || !FLAG_never_compact); - compacting_collection_ = FLAG_always_compact || force_compaction_; - - // We compact the old generation if it gets too fragmented (ie, we could - // recover an expected amount of space by reclaiming the waste and free - // list blocks). We always compact when the flag --gc-global is true - // because objects do not get promoted out of new space on non-compacting - // GCs. - if (!compacting_collection_) { - int old_gen_recoverable = 0; - int old_gen_used = 0; - - OldSpaces spaces; - while (OldSpace* space = spaces.next()) { - old_gen_recoverable += space->Waste() + space->AvailableFree(); - old_gen_used += space->Size(); - } - int old_gen_fragmentation = - static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); - if (old_gen_fragmentation > kFragmentationLimit) { - compacting_collection_ = true; - } - } + compacting_collection_ = + FLAG_always_compact || force_compaction_ || compact_on_next_gc_; + compact_on_next_gc_ = false; if (FLAG_never_compact) compacting_collection_ = false; if (FLAG_collect_maps) CreateBackPointers(); @@ -173,6 +154,31 @@ void MarkCompactCollector::Finish() { // GC, because it relies on the new address of certain old space // objects (empty string, illegal builtin). StubCache::Clear(); + + // If we've just compacted old space there's no reason to check the + // fragmentation limit. Just return. + if (HasCompacted()) return; + + // We compact the old generation on the next GC if it has gotten too + // fragmented (ie, we could recover an expected amount of space by + // reclaiming the waste and free list blocks). + static const int kFragmentationLimit = 15; // Percent. + static const int kFragmentationAllowed = 1 * MB; // Absolute. + int old_gen_recoverable = 0; + int old_gen_used = 0; + + OldSpaces spaces; + while (OldSpace* space = spaces.next()) { + old_gen_recoverable += space->Waste() + space->AvailableFree(); + old_gen_used += space->Size(); + } + + int old_gen_fragmentation = + static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); + if (old_gen_fragmentation > kFragmentationLimit && + old_gen_recoverable > kFragmentationAllowed) { + compact_on_next_gc_ = true; + } } diff --git a/V8Binding/v8/src/mark-compact.h b/V8Binding/v8/src/mark-compact.h index 0bd212e..2da2b1f 100644 --- a/V8Binding/v8/src/mark-compact.h +++ b/V8Binding/v8/src/mark-compact.h @@ -130,6 +130,9 @@ class MarkCompactCollector: public AllStatic { // Global flag indicating whether spaces were compacted on the last GC. static bool compacting_collection_; + // Global flag indicating whether spaces will be compacted on the next GC. + static bool compact_on_next_gc_; + // The number of objects left marked at the end of the last completed full // GC (expected to be zero). static int previous_marked_count_; diff --git a/V8Binding/v8/src/messages.js b/V8Binding/v8/src/messages.js index 8328fe5..255e544 100644 --- a/V8Binding/v8/src/messages.js +++ b/V8Binding/v8/src/messages.js @@ -163,7 +163,7 @@ function FormatMessage(message) { illegal_break: "Illegal break statement", illegal_continue: "Illegal continue statement", illegal_return: "Illegal return statement", - error_loading_debugger: "Error loading debugger %0", + error_loading_debugger: "Error loading debugger", no_input_to_regexp: "No input to %0", result_not_primitive: "Result of %0 must be a primitive, was %1", invalid_json: "String '%0' is not valid JSON", diff --git a/V8Binding/v8/src/objects-debug.cc b/V8Binding/v8/src/objects-debug.cc index ef4aae5..9fc9b1d 100644 --- a/V8Binding/v8/src/objects-debug.cc +++ b/V8Binding/v8/src/objects-debug.cc @@ -769,11 +769,14 @@ void JSRegExp::JSRegExpVerify() { FixedArray* arr = FixedArray::cast(data()); Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex); - ASSERT(ascii_data->IsTheHole() - || (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray())); + // TheHole : Not compiled yet. + // JSObject: Compilation error. + // Code/ByteArray: Compiled code. + ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() || + (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray())); Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex); - ASSERT(uc16_data->IsTheHole() - || (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray())); + ASSERT(uc16_data->IsTheHole() || ascii_data->IsJSObject() || + (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray())); ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi()); ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi()); break; diff --git a/V8Binding/v8/src/objects.h b/V8Binding/v8/src/objects.h index 4b89899..bd8ca51 100644 --- a/V8Binding/v8/src/objects.h +++ b/V8Binding/v8/src/objects.h @@ -2634,7 +2634,7 @@ class Code: public HeapObject { // the layout of the code object into account. int ExecutableSize() { // Check that the assumptions about the layout of the code object holds. - ASSERT_EQ(instruction_start() - address(), + ASSERT_EQ(static_cast<int>(instruction_start() - address()), Code::kHeaderSize); return instruction_size() + Code::kHeaderSize; } @@ -2891,8 +2891,12 @@ class Map: public HeapObject { // Byte offsets within kInstanceSizesOffset. static const int kInstanceSizeOffset = kInstanceSizesOffset + 0; - static const int kInObjectPropertiesOffset = kInstanceSizesOffset + 1; - static const int kPreAllocatedPropertyFieldsOffset = kInstanceSizesOffset + 2; + static const int kInObjectPropertiesByte = 1; + static const int kInObjectPropertiesOffset = + kInstanceSizesOffset + kInObjectPropertiesByte; + static const int kPreAllocatedPropertyFieldsByte = 2; + static const int kPreAllocatedPropertyFieldsOffset = + kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte; // The byte at position 3 is not in use at the moment. // Byte offsets within kInstanceAttributesOffset attributes. @@ -3528,9 +3532,13 @@ class JSRegExp: public JSObject { static const int kAtomDataSize = kAtomPatternIndex + 1; - // Irregexp compiled code or bytecode for ASCII. + // Irregexp compiled code or bytecode for ASCII. If compilation + // fails, this fields hold an exception object that should be + // thrown if the regexp is used again. static const int kIrregexpASCIICodeIndex = kDataIndex; - // Irregexp compiled code or bytecode for UC16. + // Irregexp compiled code or bytecode for UC16. If compilation + // fails, this fields hold an exception object that should be + // thrown if the regexp is used again. static const int kIrregexpUC16CodeIndex = kDataIndex + 1; // Maximal number of registers used by either ASCII or UC16. // Only used to check that there is enough stack space diff --git a/V8Binding/v8/src/parser.cc b/V8Binding/v8/src/parser.cc index 5cc6341..0abb9ed 100644 --- a/V8Binding/v8/src/parser.cc +++ b/V8Binding/v8/src/parser.cc @@ -2397,12 +2397,6 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) { // WithStatement :: // 'with' '(' Expression ')' Statement - // We do not allow the use of 'with' statements in the internal JS - // code. If 'with' statements were allowed, the simplified setup of - // the runtime context chain would allow access to properties in the - // global object from within a 'with' statement. - ASSERT(extension_ != NULL || !Bootstrapper::IsActive()); - Expect(Token::WITH, CHECK_OK); Expect(Token::LPAREN, CHECK_OK); Expression* expr = ParseExpression(true, CHECK_OK); @@ -3088,9 +3082,6 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) { Handle<String> name = callee->name(); Variable* var = top_scope_->Lookup(name); if (var == NULL) { - // We do not allow direct calls to 'eval' in our internal - // JS files. Use builtin functions instead. - ASSERT(extension_ != NULL || !Bootstrapper::IsActive()); top_scope_->RecordEvalCall(); is_potentially_direct_eval = true; } diff --git a/V8Binding/v8/src/platform-linux.cc b/V8Binding/v8/src/platform-linux.cc index 6ec5070..cb93afb 100644 --- a/V8Binding/v8/src/platform-linux.cc +++ b/V8Binding/v8/src/platform-linux.cc @@ -56,6 +56,8 @@ #include "v8.h" #include "platform.h" +#include "top.h" +#include "v8threads.h" namespace v8 { @@ -580,6 +582,7 @@ Semaphore* OS::CreateSemaphore(int count) { #ifdef ENABLE_LOGGING_AND_PROFILING static Sampler* active_sampler_ = NULL; +static pthread_t vm_thread_ = 0; #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) @@ -608,6 +611,30 @@ enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11}; #endif +// A function that determines if a signal handler is called in the context +// of a VM thread. +// +// The problem is that SIGPROF signal can be delivered to an arbitrary thread +// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2) +// So, if the signal is being handled in the context of a non-VM thread, +// it means that the VM thread is running, and trying to sample its stack can +// cause a crash. +static inline bool IsVmThread() { + // In the case of a single VM thread, this check is enough. + if (pthread_equal(pthread_self(), vm_thread_)) return true; + // If there are multiple threads that use VM, they must have a thread id + // stored in TLS. To verify that the thread is really executing VM, + // we check Top's data. Having that ThreadManager::RestoreThread first + // restores ThreadLocalTop from TLS, and only then erases the TLS value, + // reading Top::thread_id() should not be affected by races. + if (ThreadManager::HasId() && !ThreadManager::IsArchived() && + ThreadManager::CurrentId() == Top::thread_id()) { + return true; + } + return false; +} + + static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { USE(info); if (signal != SIGPROF) return; @@ -640,7 +667,8 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { sample.fp = mcontext.arm_fp; #endif #endif - active_sampler_->SampleStack(&sample); + if (IsVmThread()) + active_sampler_->SampleStack(&sample); } // We always sample the VM state. @@ -678,6 +706,8 @@ void Sampler::Start() { // platforms. if (active_sampler_ != NULL) return; + vm_thread_ = pthread_self(); + // Request profiling signals. struct sigaction sa; sa.sa_sigaction = ProfilerSignalHandler; @@ -713,6 +743,7 @@ void Sampler::Stop() { active_ = false; } + #endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/V8Binding/v8/src/platform-macos.cc b/V8Binding/v8/src/platform-macos.cc index c081064..a78142a 100644 --- a/V8Binding/v8/src/platform-macos.cc +++ b/V8Binding/v8/src/platform-macos.cc @@ -211,8 +211,17 @@ void OS::LogSharedLibraryAddresses() { for (unsigned int i = 0; i < images_count; ++i) { const mach_header* header = _dyld_get_image_header(i); if (header == NULL) continue; +#if V8_HOST_ARCH_X64 + uint64_t size; + char* code_ptr = getsectdatafromheader_64( + reinterpret_cast<const mach_header_64*>(header), + SEG_TEXT, + SECT_TEXT, + &size); +#else unsigned int size; char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); +#endif if (code_ptr == NULL) continue; const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; diff --git a/V8Binding/v8/src/regexp-macro-assembler-irregexp-inl.h b/V8Binding/v8/src/regexp-macro-assembler-irregexp-inl.h index 5074f21..b487468 100644 --- a/V8Binding/v8/src/regexp-macro-assembler-irregexp-inl.h +++ b/V8Binding/v8/src/regexp-macro-assembler-irregexp-inl.h @@ -38,6 +38,7 @@ namespace v8 { namespace internal { +#ifndef V8_NATIVE_REGEXP void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte, uint32_t twenty_four_bits) { @@ -70,6 +71,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) { pc_ += 4; } +#endif // ! V8_NATIVE_REGEXP } } // namespace v8::internal diff --git a/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc b/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc index 21b622e..f9c7eee 100644 --- a/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc +++ b/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc @@ -36,6 +36,7 @@ namespace v8 { namespace internal { +#ifndef V8_NATIVE_REGEXP RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer) : buffer_(buffer), @@ -458,5 +459,6 @@ void RegExpMacroAssemblerIrregexp::Expand() { } } +#endif // !V8_NATIVE_REGEXP } } // namespace v8::internal diff --git a/V8Binding/v8/src/regexp-macro-assembler-irregexp.h b/V8Binding/v8/src/regexp-macro-assembler-irregexp.h index dd64e7a..642a283 100644 --- a/V8Binding/v8/src/regexp-macro-assembler-irregexp.h +++ b/V8Binding/v8/src/regexp-macro-assembler-irregexp.h @@ -31,6 +31,7 @@ namespace v8 { namespace internal { +#ifndef V8_NATIVE_REGEXP class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler { public: @@ -133,6 +134,8 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler { DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp); }; +#endif // !V8_NATIVE_REGEXP + } } // namespace v8::internal #endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_ diff --git a/V8Binding/v8/src/regexp-macro-assembler-tracer.h b/V8Binding/v8/src/regexp-macro-assembler-tracer.h index 28434d7..28ca5f3 100644 --- a/V8Binding/v8/src/regexp-macro-assembler-tracer.h +++ b/V8Binding/v8/src/regexp-macro-assembler-tracer.h @@ -37,7 +37,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler { explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler); virtual ~RegExpMacroAssemblerTracer(); virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); } - + virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); } virtual void AdvanceCurrentPosition(int by); // Signed cp change. virtual void AdvanceRegister(int reg, int by); // r[reg] += by. virtual void Backtrack(); diff --git a/V8Binding/v8/src/regexp-macro-assembler.cc b/V8Binding/v8/src/regexp-macro-assembler.cc index 7f830fe..0d00cee 100644 --- a/V8Binding/v8/src/regexp-macro-assembler.cc +++ b/V8Binding/v8/src/regexp-macro-assembler.cc @@ -30,6 +30,13 @@ #include "assembler.h" #include "regexp-stack.h" #include "regexp-macro-assembler.h" +#if V8_TARGET_ARCH_ARM +#include "arm/simulator-arm.h" +#elif V8_TARGET_ARCH_IA32 +#include "ia32/simulator-ia32.h" +#elif V8_TARGET_ARCH_X64 +#include "x64/simulator-x64.h" +#endif namespace v8 { namespace internal { @@ -42,6 +49,15 @@ RegExpMacroAssembler::~RegExpMacroAssembler() { } +bool RegExpMacroAssembler::CanReadUnaligned() { +#ifdef V8_HOST_CAN_READ_UNALIGNED + return true; +#else + return false; +#endif +} + + #ifdef V8_NATIVE_REGEXP // Avoid unused code, e.g., on ARM. NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() { @@ -51,6 +67,15 @@ NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() { NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() { } + +bool NativeRegExpMacroAssembler::CanReadUnaligned() { +#ifdef V8_TARGET_CAN_READ_UNALIGNED + return true; +#else + return false; +#endif +} + const byte* NativeRegExpMacroAssembler::StringCharacterPosition( String* subject, int start_index) { @@ -162,13 +187,14 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute( RegExpStack stack; Address stack_base = RegExpStack::stack_base(); - int result = matcher_func(input, - start_offset, - input_start, - input_end, - output, - at_start_val, - stack_base); + int result = CALL_GENERATED_REGEXP_CODE(matcher_func, + input, + start_offset, + input_start, + input_end, + output, + at_start_val, + stack_base); ASSERT(result <= SUCCESS); ASSERT(result >= RETRY); @@ -213,5 +239,22 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16( return 1; } + +Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer, + Address* stack_base) { + size_t size = RegExpStack::stack_capacity(); + Address old_stack_base = RegExpStack::stack_base(); + ASSERT(old_stack_base == *stack_base); + ASSERT(stack_pointer <= old_stack_base); + ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size); + Address new_stack_base = RegExpStack::EnsureCapacity(size * 2); + if (new_stack_base == NULL) { + return NULL; + } + *stack_base = new_stack_base; + intptr_t stack_content_size = old_stack_base - stack_pointer; + return new_stack_base - stack_content_size; +} + #endif // V8_NATIVE_REGEXP } } // namespace v8::internal diff --git a/V8Binding/v8/src/regexp-macro-assembler.h b/V8Binding/v8/src/regexp-macro-assembler.h index e590827..26aab2c 100644 --- a/V8Binding/v8/src/regexp-macro-assembler.h +++ b/V8Binding/v8/src/regexp-macro-assembler.h @@ -61,6 +61,7 @@ class RegExpMacroAssembler { // kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck) // at least once for every stack_limit() pushes that are executed. virtual int stack_limit_slack() = 0; + virtual bool CanReadUnaligned(); virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change. virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by. // Continues execution from the position pushed on the top of the backtrack @@ -182,6 +183,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler { NativeRegExpMacroAssembler(); virtual ~NativeRegExpMacroAssembler(); + virtual bool CanReadUnaligned(); static Result Match(Handle<Code> regexp, Handle<String> subject, @@ -195,6 +197,13 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler { Address byte_offset2, size_t byte_length); + // Called from RegExp if the backtrack stack limit is hit. + // Tries to expand the stack. Returns the new stack-pointer if + // successful, and updates the stack_top address, or returns 0 if unable + // to grow the stack. + // This function must not trigger a garbage collection. + static Address GrowStack(Address stack_pointer, Address* stack_top); + static const byte* StringCharacterPosition(String* subject, int start_index); static Result Execute(Code* code, @@ -205,7 +214,25 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler { int* output, bool at_start); }; + + +// Enter C code from generated RegExp code in a way that allows +// the C code to fix the return address in case of a GC. +// Currently only needed on ARM. +class RegExpCEntryStub: public CodeStub { + public: + RegExpCEntryStub() {} + virtual ~RegExpCEntryStub() {} + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return RegExpCEntry; } + int MinorKey() { return 0; } + const char* GetName() { return "RegExpCEntryStub"; } +}; + #endif // V8_NATIVE_REGEXP + } } // namespace v8::internal #endif // V8_REGEXP_MACRO_ASSEMBLER_H_ diff --git a/V8Binding/v8/src/runtime.cc b/V8Binding/v8/src/runtime.cc index 213d9a3..c26783a 100644 --- a/V8Binding/v8/src/runtime.cc +++ b/V8Binding/v8/src/runtime.cc @@ -4058,7 +4058,7 @@ static Object* Runtime_Math_acos(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(acos(x)); + return TranscendentalCache::Get(TranscendentalCache::ACOS, x); } @@ -4067,7 +4067,7 @@ static Object* Runtime_Math_asin(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(asin(x)); + return TranscendentalCache::Get(TranscendentalCache::ASIN, x); } @@ -4076,7 +4076,7 @@ static Object* Runtime_Math_atan(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(atan(x)); + return TranscendentalCache::Get(TranscendentalCache::ATAN, x); } @@ -4117,7 +4117,7 @@ static Object* Runtime_Math_cos(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(cos(x)); + return TranscendentalCache::Get(TranscendentalCache::COS, x); } @@ -4126,7 +4126,7 @@ static Object* Runtime_Math_exp(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(exp(x)); + return TranscendentalCache::Get(TranscendentalCache::EXP, x); } @@ -4144,7 +4144,7 @@ static Object* Runtime_Math_log(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(log(x)); + return TranscendentalCache::Get(TranscendentalCache::LOG, x); } @@ -4232,7 +4232,7 @@ static Object* Runtime_Math_sin(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(sin(x)); + return TranscendentalCache::Get(TranscendentalCache::SIN, x); } @@ -4250,7 +4250,7 @@ static Object* Runtime_Math_tan(Arguments args) { ASSERT(args.length() == 1); CONVERT_DOUBLE_CHECKED(x, args[0]); - return Heap::AllocateHeapNumber(tan(x)); + return TranscendentalCache::Get(TranscendentalCache::TAN, x); } @@ -4612,7 +4612,7 @@ static JSObject* ComputeReceiverForNonGlobal(JSObject* holder) { static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) { HandleScope scope; - ASSERT(args.length() == 2); + ASSERT_EQ(2, args.length()); if (!args[0]->IsContext() || !args[1]->IsString()) { return MakePair(Top::ThrowIllegalOperation(), NULL); diff --git a/V8Binding/v8/src/serialize.cc b/V8Binding/v8/src/serialize.cc index d2fd1e4..f65235a 100644 --- a/V8Binding/v8/src/serialize.cc +++ b/V8Binding/v8/src/serialize.cc @@ -734,6 +734,20 @@ void ExternalReferenceTable::PopulateTable() { UNCLASSIFIED, 17, "compare_doubles"); +#ifdef V8_NATIVE_REGEXP + Add(ExternalReference::re_case_insensitive_compare_uc16().address(), + UNCLASSIFIED, + 18, + "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); + Add(ExternalReference::re_check_stack_guard_state().address(), + UNCLASSIFIED, + 19, + "RegExpMacroAssembler*::CheckStackGuardState()"); + Add(ExternalReference::re_grow_stack().address(), + UNCLASSIFIED, + 20, + "NativeRegExpMacroAssembler::GrowStack()"); +#endif } @@ -1119,6 +1133,11 @@ void Serializer::PutHeader() { #else writer_->PutC('0'); #endif +#ifdef V8_NATIVE_REGEXP + writer_->PutC('N'); +#else // Interpreted regexp + writer_->PutC('I'); +#endif // Write sizes of paged memory spaces. Allocate extra space for the old // and code spaces, because objects in new space will be promoted to them. writer_->PutC('S'); @@ -1182,19 +1201,25 @@ void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) { void Serializer::PutContextStack() { - List<Handle<Object> > contexts(2); + List<Context*> contexts(2); while (HandleScopeImplementer::instance()->HasSavedContexts()) { - Handle<Object> context = + Context* context = HandleScopeImplementer::instance()->RestoreContext(); contexts.Add(context); } for (int i = contexts.length() - 1; i >= 0; i--) { HandleScopeImplementer::instance()->SaveContext(contexts[i]); } - PutGlobalHandleStack(contexts); + writer_->PutC('C'); + writer_->PutC('['); + writer_->PutInt(contexts.length()); + if (!contexts.is_empty()) { + Object** start = reinterpret_cast<Object**>(&contexts.first()); + VisitPointers(start, start + contexts.length()); + } + writer_->PutC(']'); } - void Serializer::PutEncodedAddress(Address addr) { writer_->PutC('P'); writer_->PutAddress(addr); @@ -1238,7 +1263,7 @@ Address Serializer::PutObject(HeapObject* obj) { // Write out the object prologue: type, size, and simulated address of obj. writer_->PutC('['); - CHECK_EQ(0, size & kObjectAlignmentMask); + CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask)); writer_->PutInt(type); writer_->PutInt(size >> kObjectAlignmentBits); PutEncodedAddress(addr); // encodes AllocationSpace @@ -1475,6 +1500,11 @@ void Deserializer::GetHeader() { // synchronization tags. if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags."); #endif +#ifdef V8_NATIVE_REGEXP + reader_.ExpectC('N'); +#else // Interpreted regexp. + reader_.ExpectC('I'); +#endif // Ensure sufficient capacity in paged memory spaces to avoid growth // during deserialization. reader_.ExpectC('S'); @@ -1517,9 +1547,16 @@ void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) { void Deserializer::GetContextStack() { - List<Handle<Object> > entered_contexts(2); - GetGlobalHandleStack(&entered_contexts); - for (int i = 0; i < entered_contexts.length(); i++) { + reader_.ExpectC('C'); + CHECK_EQ(reader_.GetC(), '['); + int count = reader_.GetInt(); + List<Context*> entered_contexts(count); + if (count > 0) { + Object** start = reinterpret_cast<Object**>(&entered_contexts.first()); + VisitPointers(start, start + count); + } + reader_.ExpectC(']'); + for (int i = 0; i < count; i++) { HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]); } } diff --git a/V8Binding/v8/src/spaces.cc b/V8Binding/v8/src/spaces.cc index 45e82f4..de9b233 100644 --- a/V8Binding/v8/src/spaces.cc +++ b/V8Binding/v8/src/spaces.cc @@ -2561,10 +2561,12 @@ void LargeObjectSpace::Verify() { ASSERT(map->IsMap()); ASSERT(Heap::map_space()->Contains(map)); - // We have only code, sequential strings, fixed arrays, and byte arrays - // in large object space. - ASSERT(object->IsCode() || object->IsSeqString() - || object->IsFixedArray() || object->IsByteArray()); + // We have only code, sequential strings, external strings + // (sequential strings that have been morphed into external + // strings), fixed arrays, and byte arrays in large object space. + ASSERT(object->IsCode() || object->IsSeqString() || + object->IsExternalString() || object->IsFixedArray() || + object->IsByteArray()); // The object itself should look OK. object->Verify(); diff --git a/V8Binding/v8/src/stub-cache.cc b/V8Binding/v8/src/stub-cache.cc index a719f29..2906c22 100644 --- a/V8Binding/v8/src/stub-cache.cc +++ b/V8Binding/v8/src/stub-cache.cc @@ -1099,9 +1099,14 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) { Object* ConstructStubCompiler::GetCode() { Code::Flags flags = Code::ComputeFlags(Code::STUB); - return GetCodeWithFlags(flags, "ConstructStub"); + Object* result = GetCodeWithFlags(flags, "ConstructStub"); + if (!result->IsFailure()) { + Code* code = Code::cast(result); + USE(code); + LOG(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub")); + } + return result; } - } } // namespace v8::internal diff --git a/V8Binding/v8/src/top.cc b/V8Binding/v8/src/top.cc index 550703a..5c22bcf 100644 --- a/V8Binding/v8/src/top.cc +++ b/V8Binding/v8/src/top.cc @@ -855,23 +855,18 @@ void Top::TraceException(bool flag) { } -bool Top::OptionalRescheduleException(bool is_bottom_call, - bool force_clear_catchable) { +bool Top::OptionalRescheduleException(bool is_bottom_call) { // Allways reschedule out of memory exceptions. if (!is_out_of_memory()) { bool is_termination_exception = pending_exception() == Heap::termination_exception(); - // Do not reschedule the exception if this is the bottom call or - // if we are asked to clear catchable exceptions. Termination - // exceptions are not catchable and are only cleared if this is - // the bottom call. - bool clear_exception = is_bottom_call || - (force_clear_catchable && !is_termination_exception); + // Do not reschedule the exception if this is the bottom call. + bool clear_exception = is_bottom_call; if (is_termination_exception) { - thread_local_.external_caught_exception_ = false; if (is_bottom_call) { + thread_local_.external_caught_exception_ = false; clear_pending_exception(); return false; } diff --git a/V8Binding/v8/src/top.h b/V8Binding/v8/src/top.h index d4d73c2..5b3d6a0 100644 --- a/V8Binding/v8/src/top.h +++ b/V8Binding/v8/src/top.h @@ -157,8 +157,8 @@ class Top { // exceptions. If an exception was thrown and not handled by an external // handler the exception is scheduled to be rethrown when we return to running // JavaScript code. If an exception is scheduled true is returned. - static bool OptionalRescheduleException(bool is_bottom_call, - bool force_clear_catchable); + static bool OptionalRescheduleException(bool is_bottom_call); + static bool* external_caught_exception_address() { return &thread_local_.external_caught_exception_; diff --git a/V8Binding/v8/src/v8threads.cc b/V8Binding/v8/src/v8threads.cc index 8e0a8be..3022a7e 100644 --- a/V8Binding/v8/src/v8threads.cc +++ b/V8Binding/v8/src/v8threads.cc @@ -241,7 +241,10 @@ ThreadState* ThreadState::Next() { } -int ThreadManager::next_id_ = 0; +// Thread ids must start with 1, because in TLS having thread id 0 can't +// be distinguished from not having a thread id at all (since NULL is +// defined as 0.) +int ThreadManager::last_id_ = 0; Mutex* ThreadManager::mutex_ = OS::CreateMutex(); ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID); ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID); @@ -250,7 +253,7 @@ ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL; void ThreadManager::ArchiveThread() { ASSERT(!lazily_archived_thread_.IsValid()); - ASSERT(Thread::GetThreadLocal(thread_state_key) == NULL); + ASSERT(!IsArchived()); ThreadState* state = ThreadState::GetFree(); state->Unlink(); Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state)); @@ -281,6 +284,11 @@ void ThreadManager::EagerlyArchiveThread() { } +bool ThreadManager::IsArchived() { + return Thread::HasThreadLocal(thread_state_key); +} + + void ThreadManager::Iterate(ObjectVisitor* v) { // Expecting no threads during serialization/deserialization for (ThreadState* state = ThreadState::FirstInUse(); @@ -321,15 +329,21 @@ int ThreadManager::CurrentId() { void ThreadManager::AssignId() { - if (!Thread::HasThreadLocal(thread_id_key)) { + if (!HasId()) { ASSERT(Locker::IsLocked()); - int thread_id = next_id_++; + int thread_id = ++last_id_; + ASSERT(thread_id > 0); // see the comment near last_id_ definition. Thread::SetThreadLocalInt(thread_id_key, thread_id); Top::set_thread_id(thread_id); } } +bool ThreadManager::HasId() { + return Thread::HasThreadLocal(thread_id_key); +} + + void ThreadManager::TerminateExecution(int thread_id) { for (ThreadState* state = ThreadState::FirstInUse(); state != NULL; diff --git a/V8Binding/v8/src/v8threads.h b/V8Binding/v8/src/v8threads.h index 3f81f57..f808e54 100644 --- a/V8Binding/v8/src/v8threads.h +++ b/V8Binding/v8/src/v8threads.h @@ -86,6 +86,7 @@ class ThreadManager : public AllStatic { static void ArchiveThread(); static bool RestoreThread(); + static bool IsArchived(); static void Iterate(ObjectVisitor* v); static void MarkCompactPrologue(bool is_compacting); @@ -94,6 +95,7 @@ class ThreadManager : public AllStatic { static int CurrentId(); static void AssignId(); + static bool HasId(); static void TerminateExecution(int thread_id); @@ -101,7 +103,7 @@ class ThreadManager : public AllStatic { private: static void EagerlyArchiveThread(); - static int next_id_; // V8 threads are identified through an integer. + static int last_id_; // V8 threads are identified through an integer. static Mutex* mutex_; static ThreadHandle mutex_owner_; static ThreadHandle lazily_archived_thread_; diff --git a/V8Binding/v8/src/version.cc b/V8Binding/v8/src/version.cc index d140470..88256d1 100644 --- a/V8Binding/v8/src/version.cc +++ b/V8Binding/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 1 #define MINOR_VERSION 3 -#define BUILD_NUMBER 9 +#define BUILD_NUMBER 10 #define PATCH_LEVEL 0 #define CANDIDATE_VERSION true diff --git a/V8Binding/v8/src/x64/assembler-x64.cc b/V8Binding/v8/src/x64/assembler-x64.cc index a02557e..6304324 100644 --- a/V8Binding/v8/src/x64/assembler-x64.cc +++ b/V8Binding/v8/src/x64/assembler-x64.cc @@ -120,13 +120,23 @@ void CpuFeatures::Probe() { supported_ = kDefaultCpuFeatures | (1 << CPUID); { Scope fscope(CPUID); __ cpuid(); + // Move the result from ecx:edx to rdi. + __ movl(rdi, rdx); // Zero-extended to 64 bits. + __ shl(rcx, Immediate(32)); + __ or_(rdi, rcx); + + // Get the sahf supported flag, from CPUID(0x80000001) + __ movq(rax, 0x80000001, RelocInfo::NONE); + __ cpuid(); } supported_ = kDefaultCpuFeatures; - // Move the result from ecx:edx to rax and make sure to mark the - // CPUID feature as supported. - __ movl(rax, rdx); // Zero-extended to 64 bits. - __ shl(rcx, Immediate(32)); + // Put the CPU flags in rax. + // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID). + __ movl(rax, Immediate(1)); + __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported. + __ not_(rax); + __ and_(rax, rdi); __ or_(rax, rcx); __ or_(rax, Immediate(1 << CPUID)); diff --git a/V8Binding/v8/src/x64/assembler-x64.h b/V8Binding/v8/src/x64/assembler-x64.h index 9d602b9..4d341c6 100644 --- a/V8Binding/v8/src/x64/assembler-x64.h +++ b/V8Binding/v8/src/x64/assembler-x64.h @@ -361,7 +361,12 @@ class CpuFeatures : public AllStatic { // Feature flags bit positions. They are mostly based on the CPUID spec. // (We assign CPUID itself to one of the currently reserved bits -- // feel free to change this if needed.) - enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 }; + enum Feature { SSE3 = 32, + SSE2 = 26, + CMOV = 15, + RDTSC = 4, + CPUID = 10, + SAHF = 0}; // Detect features of the target CPU. Set safe defaults if the serializer // is enabled (snapshots must be portable). static void Probe(); diff --git a/V8Binding/v8/src/x64/builtins-x64.cc b/V8Binding/v8/src/x64/builtins-x64.cc index 882b32d..fab71fa 100644 --- a/V8Binding/v8/src/x64/builtins-x64.cc +++ b/V8Binding/v8/src/x64/builtins-x64.cc @@ -139,9 +139,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Fill remaining expected arguments with undefined values. Label fill; - __ movq(kScratchRegister, - Factory::undefined_value(), - RelocInfo::EMBEDDED_OBJECT); + __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); __ bind(&fill); __ incq(rcx); __ push(kScratchRegister); @@ -218,9 +216,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ testl(rbx, Immediate(kSmiTagMask)); __ j(zero, &call_to_object); - __ Cmp(rbx, Factory::null_value()); + __ CompareRoot(rbx, Heap::kNullValueRootIndex); __ j(equal, &use_global_receiver); - __ Cmp(rbx, Factory::undefined_value()); + __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); __ j(equal, &use_global_receiver); __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx); @@ -386,9 +384,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ movq(rbx, Operand(rbp, kReceiverOffset)); __ testl(rbx, Immediate(kSmiTagMask)); __ j(zero, &call_to_object); - __ Cmp(rbx, Factory::null_value()); + __ CompareRoot(rbx, Heap::kNullValueRootIndex); __ j(equal, &use_global_receiver); - __ Cmp(rbx, Factory::undefined_value()); + __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); __ j(equal, &use_global_receiver); // If given receiver is already a JavaScript object then there's no @@ -538,17 +536,18 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); __ shl(rdi, Immediate(kPointerSizeLog2)); // rdi: size of new object - // Make sure that the maximum heap object size will never cause us - // problem here, because it is always greater than the maximum - // instance size that can be represented in a byte. - ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte)); - __ AllocateObjectInNewSpace(rdi, rbx, rdi, no_reg, &rt_call, false); + __ AllocateObjectInNewSpace(rdi, + rbx, + rdi, + no_reg, + &rt_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields. // rax: initial map // rbx: JSObject (not HeapObject tagged - the actual address). // rdi: start of next object __ movq(Operand(rbx, JSObject::kMapOffset), rax); - __ Move(rcx, Factory::empty_fixed_array()); + __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex); __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); __ movq(Operand(rbx, JSObject::kElementsOffset), rcx); // Set extra fields in the newly allocated object. @@ -556,7 +555,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // rbx: JSObject // rdi: start of next object { Label loop, entry; - __ Move(rdx, Factory::undefined_value()); + __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); __ jmp(&entry); __ bind(&loop); @@ -597,8 +596,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // rbx: JSObject // rdi: start of next object (will be start of FixedArray) // rdx: number of elements in properties array - ASSERT(Heap::MaxObjectSizeInPagedSpace() > - (FixedArray::kHeaderSize + 255*kPointerSize)); __ AllocateObjectInNewSpace(FixedArray::kHeaderSize, times_pointer_size, rdx, @@ -606,14 +603,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { rax, no_reg, &undo_allocation, - true); + RESULT_CONTAINS_TOP); // Initialize the FixedArray. // rbx: JSObject // rdi: FixedArray // rdx: number of elements // rax: start of next object - __ Move(rcx, Factory::fixed_array_map()); + __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex); __ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map __ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length @@ -623,7 +620,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // rax: start of next object // rdx: number of elements { Label loop, entry; - __ Move(rdx, Factory::undefined_value()); + __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); __ jmp(&entry); __ bind(&loop); @@ -797,6 +794,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ movq(rax, rcx); __ movq(rbx, r8); #endif // _WIN64 + + // Set up the roots register. + ExternalReference roots_address = ExternalReference::roots_address(); + __ movq(r13, roots_address); + // Current stack contents: // [rsp + 2 * kPointerSize ... ]: Internal frame // [rsp + kPointerSize] : function diff --git a/V8Binding/v8/src/x64/cfg-x64.cc b/V8Binding/v8/src/x64/cfg-x64.cc index 34ddbbf..0b71d8e 100644 --- a/V8Binding/v8/src/x64/cfg-x64.cc +++ b/V8Binding/v8/src/x64/cfg-x64.cc @@ -71,8 +71,7 @@ void EntryNode::Compile(MacroAssembler* masm) { __ push(rdi); int count = CfgGlobals::current()->fun()->scope()->num_stack_slots(); if (count > 0) { - __ movq(kScratchRegister, Factory::undefined_value(), - RelocInfo::EMBEDDED_OBJECT); + __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); for (int i = 0; i < count; i++) { __ push(kScratchRegister); } diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc index f915a0c..d7e15aa 100644 --- a/V8Binding/v8/src/x64/codegen-x64.cc +++ b/V8Binding/v8/src/x64/codegen-x64.cc @@ -537,7 +537,6 @@ bool CodeGenerator::HasValidEntryRegisters() { && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)) - && (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0)) && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); } #endif @@ -858,10 +857,7 @@ void DeferredStackCheck::Generate() { void CodeGenerator::CheckStack() { if (FLAG_check_stack) { DeferredStackCheck* deferred = new DeferredStackCheck; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ movq(kScratchRegister, stack_guard_limit); - __ cmpq(rsp, Operand(kScratchRegister, 0)); + __ CompareRoot(rsp, Heap::kStackLimitRootIndex); deferred->Branch(below); deferred->BindExit(); } @@ -941,9 +937,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { // 'undefined') because we may have a (legal) redeclaration and we // must not destroy the current value. if (node->mode() == Variable::CONST) { - __ movq(kScratchRegister, Factory::the_hole_value(), - RelocInfo::EMBEDDED_OBJECT); - frame_->EmitPush(kScratchRegister); + frame_->EmitPush(Heap::kTheHoleValueRootIndex); } else if (node->fun() != NULL) { Load(node->fun()); } else { @@ -1649,9 +1643,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { frame_->EmitPop(rax); // rax: value to be iterated over - __ Cmp(rax, Factory::undefined_value()); + __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); exit.Branch(equal); - __ Cmp(rax, Factory::null_value()); + __ CompareRoot(rax, Heap::kNullValueRootIndex); exit.Branch(equal); // Stack layout in body: @@ -1687,7 +1681,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { // Runtime::kGetPropertyNamesFast) __ movq(rdx, rax); __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); - __ Cmp(rcx, Factory::meta_map()); + __ CompareRoot(rcx, Heap::kMetaMapRootIndex); fixed_array.Branch(not_equal); // Get enum cache @@ -1756,7 +1750,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { __ movq(rbx, rax); // If the property has been removed while iterating, we just skip it. - __ Cmp(rbx, Factory::null_value()); + __ CompareRoot(rbx, Heap::kNullValueRootIndex); node->continue_target()->Branch(equal); end_del_check.Bind(); @@ -2031,10 +2025,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) { // Fake a top of stack value (unneeded when FALLING) and set the // state in ecx, then jump around the unlink blocks if any. - __ movq(kScratchRegister, - Factory::undefined_value(), - RelocInfo::EMBEDDED_OBJECT); - frame_->EmitPush(kScratchRegister); + frame_->EmitPush(Heap::kUndefinedValueRootIndex); __ movq(rcx, Immediate(Smi::FromInt(FALLING))); if (nof_unlinks > 0) { finally_block.Jump(); @@ -2079,10 +2070,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) { frame_->EmitPush(rax); } else { // Fake TOS for targets that shadowed breaks and continues. - __ movq(kScratchRegister, - Factory::undefined_value(), - RelocInfo::EMBEDDED_OBJECT); - frame_->EmitPush(kScratchRegister); + frame_->EmitPush(Heap::kUndefinedValueRootIndex); } __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i))); if (--nof_unlinks > 0) { @@ -2324,7 +2312,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { // jump to the deferred code passing the literals array. DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node); - __ Cmp(boilerplate.reg(), Factory::undefined_value()); + __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex); deferred->Branch(equal); deferred->BindExit(); literals.Unuse(); @@ -2395,7 +2383,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { // If so, jump to the deferred code passing the literals array. DeferredObjectLiteral* deferred = new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node); - __ Cmp(boilerplate.reg(), Factory::undefined_value()); + __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex); deferred->Branch(equal); deferred->BindExit(); literals.Unuse(); @@ -2528,7 +2516,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { // If so, jump to the deferred code passing the literals array. DeferredArrayLiteral* deferred = new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node); - __ Cmp(boilerplate.reg(), Factory::undefined_value()); + __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex); deferred->Branch(equal); deferred->BindExit(); literals.Unuse(); @@ -3486,7 +3474,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { destination()->true_target()->Branch(zero); frame_->Spill(answer.reg()); __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); - __ Cmp(answer.reg(), Factory::heap_number_map()); + __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex); answer.Unuse(); destination()->Split(equal); @@ -3505,14 +3493,14 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { destination()->Split(below); // Unsigned byte comparison needed. } else if (check->Equals(Heap::boolean_symbol())) { - __ Cmp(answer.reg(), Factory::true_value()); + __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex); destination()->true_target()->Branch(equal); - __ Cmp(answer.reg(), Factory::false_value()); + __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex); answer.Unuse(); destination()->Split(equal); } else if (check->Equals(Heap::undefined_symbol())) { - __ Cmp(answer.reg(), Factory::undefined_value()); + __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex); destination()->true_target()->Branch(equal); __ testl(answer.reg(), Immediate(kSmiTagMask)); @@ -3537,7 +3525,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { } else if (check->Equals(Heap::object_symbol())) { __ testl(answer.reg(), Immediate(kSmiTagMask)); destination()->false_target()->Branch(zero); - __ Cmp(answer.reg(), Factory::null_value()); + __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); destination()->true_target()->Branch(equal); // It can be an undetectable object. @@ -3831,7 +3819,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { __ bind(&slow_case); // Move the undefined value into the result register, which will // trigger the slow case. - __ Move(temp.reg(), Factory::undefined_value()); + __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex); __ bind(&end); frame_->Push(&temp); @@ -4274,15 +4262,15 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) { // Fast case checks. // 'false' => false. - __ Cmp(value.reg(), Factory::false_value()); + __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex); dest->false_target()->Branch(equal); // 'true' => true. - __ Cmp(value.reg(), Factory::true_value()); + __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex); dest->true_target()->Branch(equal); // 'undefined' => false. - __ Cmp(value.reg(), Factory::undefined_value()); + __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex); dest->false_target()->Branch(equal); // Smi => false iff zero. @@ -4501,10 +4489,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { value, &slow)); if (potential_slot->var()->mode() == Variable::CONST) { - __ Cmp(value.reg(), Factory::the_hole_value()); + __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); done.Branch(not_equal, &value); - __ movq(value.reg(), Factory::undefined_value(), - RelocInfo::EMBEDDED_OBJECT); + __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex); } // There is always control flow to slow from // ContextSlotOperandCheckExtensions so we have to jump around @@ -4542,9 +4529,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { Comment cmnt(masm_, "[ Load const"); JumpTarget exit; __ movq(rcx, SlotOperand(slot, rcx)); - __ Cmp(rcx, Factory::the_hole_value()); + __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); exit.Branch(not_equal); - __ movq(rcx, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT); + __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex); exit.Bind(); frame_->EmitPush(rcx); @@ -4598,7 +4585,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, // indicates that we haven't loaded the arguments object yet, we // need to do it now. JumpTarget exit; - __ Cmp(value.reg(), Factory::the_hole_value()); + __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); frame_->Push(&value); exit.Branch(not_equal); Result arguments = StoreArgumentsObject(false); @@ -4659,7 +4646,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Init const"); __ movq(rcx, SlotOperand(slot, rcx)); - __ Cmp(rcx, Factory::the_hole_value()); + __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); exit.Branch(not_equal); } @@ -4743,7 +4730,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( __ movq(tmp.reg(), context); } // Load map for comparison into register, outside loop. - __ Move(kScratchRegister, Factory::global_context_map()); + __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex); __ bind(&next); // Terminate at global context. __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); @@ -4847,7 +4834,7 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) { // been assigned a proper value. skip_arguments = !arguments.handle()->IsTheHole(); } else { - __ Cmp(arguments.reg(), Factory::the_hole_value()); + __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex); arguments.Unuse(); done.Branch(not_equal); } @@ -4985,7 +4972,7 @@ void CodeGenerator::Comparison(Condition cc, right_side.Unuse(); left_side.Unuse(); operand.ToRegister(); - __ Cmp(operand.reg(), Factory::null_value()); + __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex); if (strict) { operand.Unuse(); dest->Split(equal); @@ -4993,7 +4980,7 @@ void CodeGenerator::Comparison(Condition cc, // The 'null' value is only equal to 'undefined' if using non-strict // comparisons. dest->true_target()->Branch(equal); - __ Cmp(operand.reg(), Factory::undefined_value()); + __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex); dest->true_target()->Branch(equal); __ testl(operand.reg(), Immediate(kSmiTagMask)); dest->false_target()->Branch(equal); @@ -6113,7 +6100,7 @@ void Reference::GetValue(TypeofState typeof_state) { FixedArray::kHeaderSize - kHeapObjectTag)); elements.Unuse(); index.Unuse(); - __ Cmp(value.reg(), Factory::the_hole_value()); + __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); deferred->Branch(equal); __ IncrementCounter(&Counters::keyed_load_inline, 1); @@ -6322,7 +6309,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ movq(rax, Operand(rsp, 1 * kPointerSize)); // 'null' => false. - __ Cmp(rax, Factory::null_value()); + __ CompareRoot(rax, Heap::kNullValueRootIndex); __ j(equal, &false_result); // Get the map and type of the heap object. @@ -6353,7 +6340,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ bind(¬_string); // HeapNumber => false iff +0, -0, or NaN. // These three cases set C3 when compared to zero in the FPU. - __ Cmp(rdx, Factory::heap_number_map()); + __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); __ j(not_equal, &true_result); // TODO(x64): Don't use fp stack, use MMX registers? __ fldz(); // Load zero onto fp stack @@ -6399,7 +6386,7 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { // If the product is zero and the non-zero factor is negative, // the spec requires us to return floating point negative zero. - if (answer != 0 || (left >= 0 && right >= 0)) { + if (answer != 0 || (left + right) >= 0) { answer_object = Smi::FromInt(static_cast<int>(answer)); } } @@ -6467,24 +6454,54 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { void UnarySubStub::Generate(MacroAssembler* masm) { Label slow; Label done; - + Label try_float; + Label special; // Check whether the value is a smi. __ testl(rax, Immediate(kSmiTagMask)); - // TODO(X64): Add inline code that handles floats, as on ia32 platform. - __ j(not_zero, &slow); + __ j(not_zero, &try_float); + // Enter runtime system if the value of the smi is zero // to make sure that we switch between 0 and -0. // Also enter it if the value of the smi is Smi::kMinValue __ testl(rax, Immediate(0x7FFFFFFE)); - __ j(zero, &slow); + __ j(zero, &special); __ neg(rax); __ jmp(&done); + + __ bind(&special); + // Either zero or -0x4000000, neither of which become a smi when negated. + __ testl(rax, rax); + __ j(not_zero, &slow); + __ Move(rax, Factory::minus_zero_value()); + __ jmp(&done); + // Enter runtime system. __ bind(&slow); __ pop(rcx); // pop return address __ push(rax); __ push(rcx); // push return address __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); + __ jmp(&done); + + // Try floating point case. + __ bind(&try_float); + __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); + __ Cmp(rdx, Factory::heap_number_map()); + __ j(not_equal, &slow); + // Operand is a float, negate its value by flipping sign bit. + __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); + __ movq(kScratchRegister, Immediate(0x01)); + __ shl(kScratchRegister, Immediate(63)); + __ xor_(rdx, kScratchRegister); // Flip sign. + // rdx is value to store. + if (overwrite_) { + __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx); + } else { + FloatingPointHelper::AllocateHeapNumber(masm, &slow, rbx, rcx); + // rcx: allocated 'empty' number + __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); + __ movq(rax, rcx); + } __ bind(&done); __ StubReturn(1); @@ -6559,7 +6576,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // One operand is a smi. // Check whether the non-smi is a heap number. - ASSERT_EQ(1, kSmiTagMask); + ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); // rcx still holds rax & kSmiTag, which is either zero or one. __ decq(rcx); // If rax is a smi, all 1s, else all 0s. __ movq(rbx, rdx); @@ -6766,7 +6783,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // Loop through the prototype chain looking for the function prototype. Label loop, is_instance, is_not_instance; - __ Move(kScratchRegister, Factory::null_value()); + __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); __ bind(&loop); __ cmpq(rcx, rbx); __ j(equal, &is_instance); @@ -6966,11 +6983,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Call C function. #ifdef _WIN64 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9 - // Store Arguments object on stack - __ movq(Operand(rsp, 1 * kPointerSize), r14); // argc. - __ movq(Operand(rsp, 2 * kPointerSize), r15); // argv. + // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots. + __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc. + __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv. // Pass a pointer to the Arguments object as the first argument. - __ lea(rcx, Operand(rsp, 1 * kPointerSize)); + __ lea(rcx, Operand(rsp, 4 * kPointerSize)); #else // ! defined(_WIN64) // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. __ movq(rdi, r14); // argc. @@ -7020,7 +7037,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Special handling of termination exceptions which are uncatchable // by javascript code. - __ Cmp(rax, Factory::termination_exception()); + __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex); __ j(equal, throw_termination_exception); // Handle normal exception. @@ -7330,13 +7347,10 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm, scratch, no_reg, need_gc, - false); + TAG_OBJECT); // Set the map and tag the result. - __ addq(result, Immediate(kHeapObjectTag)); - __ movq(kScratchRegister, - Factory::heap_number_map(), - RelocInfo::EMBEDDED_OBJECT); + __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); } @@ -7737,18 +7751,29 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { __ fild_s(Operand(rsp, 0 * kPointerSize)); __ fucompp(); __ fnstsw_ax(); - __ sahf(); // TODO(X64): Not available. - __ j(not_zero, &operand_conversion_failure); - __ j(parity_even, &operand_conversion_failure); - + if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) { + __ sahf(); + __ j(not_zero, &operand_conversion_failure); + __ j(parity_even, &operand_conversion_failure); + } else { + __ and_(rax, Immediate(0x4400)); + __ cmpl(rax, Immediate(0x4000)); + __ j(not_zero, &operand_conversion_failure); + } // Check if left operand is int32. __ fist_s(Operand(rsp, 1 * kPointerSize)); __ fild_s(Operand(rsp, 1 * kPointerSize)); __ fucompp(); __ fnstsw_ax(); - __ sahf(); // TODO(X64): Not available. Test bits in ax directly - __ j(not_zero, &operand_conversion_failure); - __ j(parity_even, &operand_conversion_failure); + if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) { + __ sahf(); + __ j(not_zero, &operand_conversion_failure); + __ j(parity_even, &operand_conversion_failure); + } else { + __ and_(rax, Immediate(0x4400)); + __ cmpl(rax, Immediate(0x4000)); + __ j(not_zero, &operand_conversion_failure); + } } // Get int32 operands and perform bitop. diff --git a/V8Binding/v8/src/x64/ic-x64.cc b/V8Binding/v8/src/x64/ic-x64.cc index d41a56c..e2f7c30 100644 --- a/V8Binding/v8/src/x64/ic-x64.cc +++ b/V8Binding/v8/src/x64/ic-x64.cc @@ -339,7 +339,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ bind(&fast); __ movq(rax, Operand(rcx, rax, times_pointer_size, FixedArray::kHeaderSize - kHeapObjectTag)); - __ Cmp(rax, Factory::the_hole_value()); + __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); // In case the loaded value is the_hole we have to consult GetProperty // to ensure the prototype chain is searched. __ j(equal, &slow); @@ -613,9 +613,9 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // Check for boolean. __ bind(&non_string); - __ Cmp(rdx, Factory::true_value()); + __ CompareRoot(rdx, Heap::kTrueValueRootIndex); __ j(equal, &boolean); - __ Cmp(rdx, Factory::false_value()); + __ CompareRoot(rdx, Heap::kFalseValueRootIndex); __ j(not_equal, &miss); __ bind(&boolean); StubCompiler::GenerateLoadGlobalFunctionPrototype( @@ -849,7 +849,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { // Check that the receiver is a valid JS object. __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx); - __ j(less, &miss); + __ j(below, &miss); // If this assert fails, we have to check upper bound too. ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc index 10d4503..a42f628 100644 --- a/V8Binding/v8/src/x64/macro-assembler-x64.cc +++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc @@ -46,6 +46,22 @@ MacroAssembler::MacroAssembler(void* buffer, int size) } +void MacroAssembler::LoadRoot(Register destination, + Heap::RootListIndex index) { + movq(destination, Operand(r13, index << kPointerSizeLog2)); +} + + +void MacroAssembler::PushRoot(Heap::RootListIndex index) { + push(Operand(r13, index << kPointerSizeLog2)); +} + + +void MacroAssembler::CompareRoot(Register with, + Heap::RootListIndex index) { + cmpq(with, Operand(r13, index << kPointerSizeLog2)); +} + static void RecordWriteHelper(MacroAssembler* masm, Register object, @@ -276,7 +292,7 @@ void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { addq(rsp, Immediate(num_arguments * kPointerSize)); } - movq(rax, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT); + LoadRoot(rax, Heap::kUndefinedValueRootIndex); } @@ -584,8 +600,14 @@ void MacroAssembler::FCmp() { fcompp(); push(rax); fnstsw_ax(); - // TODO(X64): Check that sahf is safe to use, using CPUProbe. - sahf(); + if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) { + sahf(); + } else { + shrl(rax, Immediate(8)); + and_(rax, Immediate(0xFF)); + push(rax); + popfq(); + } pop(rax); } @@ -628,7 +650,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, // If the prototype or initial map is the hole, don't return it and // simply miss the cache instead. This will allow us to allocate a // prototype object on-demand in the runtime system. - Cmp(result, Factory::the_hole_value()); + CompareRoot(result, Heap::kTheHoleValueRootIndex); j(equal, miss); // If the function does not have an initial map, we're done. @@ -994,12 +1016,6 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { } #endif - // Reserve space for the Arguments object. The Windows 64-bit ABI - // requires us to pass this structure as a pointer to its location on - // the stack. We also need backing space for the pointer, even though - // it is passed in a register. - subq(rsp, Immediate(3 * kPointerSize)); - // Get the required frame alignment for the OS. static const int kFrameAlignment = OS::ActivationFrameAlignment(); if (kFrameAlignment > 0) { @@ -1008,6 +1024,17 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { and_(rsp, kScratchRegister); } +#ifdef _WIN64 + // Reserve space for the Arguments object. The Windows 64-bit ABI + // requires us to pass this structure as a pointer to its location on + // the stack. The structure contains 2 pointers. + // The structure on the stack must be 16-byte aligned. + // We also need backing space for 4 parameters, even though + // we only pass one parameter, and it is in a register. + subq(rsp, Immediate(6 * kPointerSize)); + ASSERT(kFrameAlignment == 2 * kPointerSize); // Change the padding if needed. +#endif + // Patch the saved entry sp. movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); } @@ -1182,12 +1209,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, // Preserve original value of holder_reg. push(holder_reg); movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset)); - Cmp(holder_reg, Factory::null_value()); + CompareRoot(holder_reg, Heap::kNullValueRootIndex); Check(not_equal, "JSGlobalProxy::context() should not be null."); // Read the first word and compare to global_context_map(), movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset)); - Cmp(holder_reg, Factory::global_context_map()); + CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex); Check(equal, "JSGlobalObject::global_context should be a global context."); pop(holder_reg); } @@ -1204,18 +1231,23 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } -void MacroAssembler::LoadAllocationTopHelper( - Register result, - Register result_end, - Register scratch, - bool result_contains_top_on_entry) { +void MacroAssembler::LoadAllocationTopHelper(Register result, + Register result_end, + Register scratch, + AllocationFlags flags) { ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(); // Just return if allocation top is already known. - if (result_contains_top_on_entry) { + if ((flags & RESULT_CONTAINS_TOP) != 0) { // No use of scratch if allocation top is provided. ASSERT(scratch.is(no_reg)); +#ifdef DEBUG + // Assert that result actually contains top on entry. + movq(kScratchRegister, new_space_allocation_top); + cmpq(result, Operand(kScratchRegister, 0)); + Check(equal, "Unexpected allocation top"); +#endif return; } @@ -1252,20 +1284,16 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end, } -void MacroAssembler::AllocateObjectInNewSpace( - int object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { +void MacroAssembler::AllocateObjectInNewSpace(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -1277,25 +1305,26 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag the result if requested. + if ((flags & TAG_OBJECT) != 0) { + addq(result, Immediate(kHeapObjectTag)); + } } -void MacroAssembler::AllocateObjectInNewSpace( - int header_size, - ScaleFactor element_size, - Register element_count, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { +void MacroAssembler::AllocateObjectInNewSpace(int header_size, + ScaleFactor element_size, + Register element_count, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(result_end)); // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -1307,23 +1336,22 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); -} + // Tag the result if requested. + if ((flags & TAG_OBJECT) != 0) { + addq(result, Immediate(kHeapObjectTag)); + } +} -void MacroAssembler::AllocateObjectInNewSpace( - Register object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - bool result_contains_top_on_entry) { +void MacroAssembler::AllocateObjectInNewSpace(Register object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { // Load address of new object into result. - LoadAllocationTopHelper(result, - result_end, - scratch, - result_contains_top_on_entry); - + LoadAllocationTopHelper(result, result_end, scratch, flags); // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = @@ -1338,6 +1366,11 @@ void MacroAssembler::AllocateObjectInNewSpace( // Update allocation top. UpdateAllocationTopHelper(result_end, scratch); + + // Tag the result if requested. + if ((flags & TAG_OBJECT) != 0) { + addq(result, Immediate(kHeapObjectTag)); + } } diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.h b/V8Binding/v8/src/x64/macro-assembler-x64.h index 31135d9..8fc7a9c 100644 --- a/V8Binding/v8/src/x64/macro-assembler-x64.h +++ b/V8Binding/v8/src/x64/macro-assembler-x64.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -42,30 +42,15 @@ static const Register kScratchRegister = r10; class JumpTarget; -// Helper types to make flags easier to read at call sites. -enum InvokeFlag { - CALL_FUNCTION, - JUMP_FUNCTION -}; - -enum CodeLocation { - IN_JAVASCRIPT, - IN_JS_ENTRY, - IN_C_ENTRY -}; - -enum HandlerType { - TRY_CATCH_HANDLER, - TRY_FINALLY_HANDLER, - JS_ENTRY_HANDLER -}; - - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: MacroAssembler(void* buffer, int size); + void LoadRoot(Register destination, Heap::RootListIndex index); + void CompareRoot(Register with, Heap::RootListIndex index); + void PushRoot(Heap::RootListIndex index); + // --------------------------------------------------------------------------- // GC Support @@ -240,7 +225,7 @@ class MacroAssembler: public Assembler { Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); void AllocateObjectInNewSpace(int header_size, ScaleFactor element_size, @@ -249,14 +234,14 @@ class MacroAssembler: public Assembler { Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); void AllocateObjectInNewSpace(Register object_size, Register result, Register result_end, Register scratch, Label* gc_required, - bool result_contains_top_on_entry); + AllocationFlags flags); // Undo allocation in new space. The object passed and objects allocated after // it will no longer be allocated. Make sure that no pointers are left to the @@ -388,7 +373,7 @@ class MacroAssembler: public Assembler { void LoadAllocationTopHelper(Register result, Register result_end, Register scratch, - bool result_contains_top_on_entry); + AllocationFlags flags); void UpdateAllocationTopHelper(Register result_end, Register scratch); }; diff --git a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc index 4c6a84d..963d80e 100644 --- a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc @@ -39,6 +39,8 @@ namespace v8 { namespace internal { +#ifdef V8_NATIVE_REGEXP + /* * This assembler uses the following register assignment convention * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using @@ -110,6 +112,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64( success_label_(), backtrack_label_(), exit_label_() { + ASSERT_EQ(0, registers_to_save % 2); __ jmp(&entry_label_); // We'll write the entry code when we know more. __ bind(&start_label_); // And then continue from here. } @@ -350,8 +353,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Set byte_length. __ movq(rdx, rbx); #endif - Address function_address = FUNCTION_ADDR(&CaseInsensitiveCompareUC16); - CallCFunction(function_address, num_arguments); + ExternalReference compare = + ExternalReference::re_case_insensitive_compare_uc16(); + CallCFunction(compare, num_arguments); // Restore original values before reacting on result value. __ Move(code_object_pointer(), masm_->CodeObject()); @@ -608,7 +612,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots. // Store register parameters in pre-allocated stack slots, __ movq(Operand(rbp, kInputString), rcx); - __ movq(Operand(rbp, kStartIndex), rdx); + __ movzxlq(Operand(rbp, kStartIndex), rdx); // Passed as int in eax. __ movq(Operand(rbp, kInputStart), r8); __ movq(Operand(rbp, kInputEnd), r9); // Callee-save on Win64. @@ -707,7 +711,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { __ Move(code_object_pointer(), masm_->CodeObject()); // Load previous char as initial value of current-character. Label at_start; - __ cmpq(Operand(rbp, kAtStart), Immediate(0)); + __ cmpb(Operand(rbp, kAtStart), Immediate(0)); __ j(not_equal, &at_start); LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. __ jmp(&start_label_); @@ -808,11 +812,12 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // First argument, backtrack stackpointer, is already in rcx. __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument #else - // AMD64 ABI passes paremeters in rdi, rsi. + // AMD64 ABI passes parameters in rdi, rsi. __ movq(rdi, backtrack_stackpointer()); // First argument. __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument. #endif - CallCFunction(FUNCTION_ADDR(&GrowStack), num_arguments); + ExternalReference grow_stack = ExternalReference::re_grow_stack(); + CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. __ testq(rax, rax); @@ -889,7 +894,9 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset, int characters) { ASSERT(cp_offset >= -1); // ^ and \b can look behind one character. ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works) - CheckPosition(cp_offset + characters - 1, on_end_of_input); + if (check_bounds) { + CheckPosition(cp_offset + characters - 1, on_end_of_input); + } LoadCurrentCharacterUnchecked(cp_offset, characters); } @@ -997,7 +1004,9 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() { // return address). __ lea(rdi, Operand(rsp, -kPointerSize)); #endif - CallCFunction(FUNCTION_ADDR(&CheckStackGuardState), num_arguments); + ExternalReference stack_check = + ExternalReference::re_check_stack_guard_state(); + CallCFunction(stack_check, num_arguments); } @@ -1080,23 +1089,6 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, } -Address RegExpMacroAssemblerX64::GrowStack(Address stack_pointer, - Address* stack_base) { - size_t size = RegExpStack::stack_capacity(); - Address old_stack_base = RegExpStack::stack_base(); - ASSERT(old_stack_base == *stack_base); - ASSERT(stack_pointer <= old_stack_base); - ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size); - Address new_stack_base = RegExpStack::EnsureCapacity(size * 2); - if (new_stack_base == NULL) { - return NULL; - } - *stack_base = new_stack_base; - intptr_t stack_content_size = old_stack_base - stack_pointer; - return new_stack_base - stack_content_size; -} - - Operand RegExpMacroAssemblerX64::register_location(int register_index) { ASSERT(register_index < (1<<30)); if (num_registers_ <= register_index) { @@ -1256,17 +1248,16 @@ void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) { } -void RegExpMacroAssemblerX64::CallCFunction(Address function_address, +void RegExpMacroAssemblerX64::CallCFunction(ExternalReference function, int num_arguments) { - // Don't compile regexps with serialization enabled. The addresses of the C++ - // function being called isn't relocatable. - ASSERT(!Serializer::enabled()); - __ movq(rax, reinterpret_cast<intptr_t>(function_address), RelocInfo::NONE); + __ movq(rax, function); __ call(rax); ASSERT(OS::ActivationFrameAlignment() != 0); #ifdef _WIN64 __ movq(rsp, Operand(rsp, num_arguments * kPointerSize)); #else + // All arguments passed in registers. + ASSERT(num_arguments <= 6); __ pop(rsp); #endif } @@ -1297,5 +1288,12 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset, } +void RegExpCEntryStub::Generate(MacroAssembler* masm_) { + __ int3(); // Unused on x64. +} + #undef __ + +#endif // V8_NATIVE_REGEXP + }} // namespace v8::internal diff --git a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h index a270bc1..3e6720d 100644 --- a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h +++ b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h @@ -31,6 +31,8 @@ namespace v8 { namespace internal { +#ifdef V8_NATIVE_REGEXP + class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { public: RegExpMacroAssemblerX64(Mode mode, int registers_to_save); @@ -113,6 +115,13 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { int* output, bool at_start); + // Called from RegExp if the stack-guard is triggered. + // If the code object is relocated, the return address is fixed before + // returning. + static int CheckStackGuardState(Address* return_address, + Code* re_code, + Address re_frame); + private: // Offsets from rbp of function parameters and stored registers. static const int kFramePointer = 0; @@ -120,16 +129,18 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { static const int kReturn_eip = kFramePointer + kPointerSize; static const int kFrameAlign = kReturn_eip + kPointerSize; -#ifdef __MSVC__ +#ifdef _WIN64 // Parameters (first four passed as registers, but with room on stack). // In Microsoft 64-bit Calling Convention, there is room on the callers // stack (before the return address) to spill parameter registers. We // use this space to store the register passed parameters. static const int kInputString = kFrameAlign; + // StartIndex is passed as 32 bit int. static const int kStartIndex = kInputString + kPointerSize; static const int kInputStart = kStartIndex + kPointerSize; static const int kInputEnd = kInputStart + kPointerSize; static const int kRegisterOutput = kInputEnd + kPointerSize; + // AtStart is passed as 32 bit int (values 0 or 1). static const int kAtStart = kRegisterOutput + kPointerSize; static const int kStackHighEnd = kAtStart + kPointerSize; #else @@ -145,7 +156,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { static const int kStackHighEnd = kFrameAlign; #endif -#ifdef __MSVC__ +#ifdef _WIN64 // Microsoft calling convention has three callee-saved registers // (that we are using). We push these after the frame pointer. static const int kBackup_rsi = kFramePointer - kPointerSize; @@ -181,23 +192,9 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { // Check whether we are exceeding the stack limit on the backtrack stack. void CheckStackLimit(); - // Called from RegExp if the stack-guard is triggered. - // If the code object is relocated, the return address is fixed before - // returning. - static int CheckStackGuardState(Address* return_address, - Code* re_code, - Address re_frame); - // Generate a call to CheckStackGuardState. void CallCheckStackGuardState(); - // Called from RegExp if the backtrack stack limit is hit. - // Tries to expand the stack. Returns the new stack-pointer if - // successful, and updates the stack_top address, or returns 0 if unable - // to grow the stack. - // This function must not trigger a garbage collection. - static Address GrowStack(Address stack_pointer, Address* stack_top); - // The rbp-relative location of a regexp register. Operand register_location(int register_index); @@ -264,7 +261,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { // by FrameAlign. The called function is not allowed to trigger a garbage // collection, since that might move the code and invalidate the return // address (unless this is somehow accounted for by the called function). - inline void CallCFunction(Address function_address, int num_arguments); + inline void CallCFunction(ExternalReference function, int num_arguments); MacroAssembler* masm_; @@ -290,6 +287,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { Label stack_overflow_label_; }; +#endif // V8_NATIVE_REGEXP + }} // namespace v8::internal #endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_ diff --git a/V8Binding/v8/src/x64/register-allocator-x64-inl.h b/V8Binding/v8/src/x64/register-allocator-x64-inl.h index 54729d6..d630b33 100644 --- a/V8Binding/v8/src/x64/register-allocator-x64-inl.h +++ b/V8Binding/v8/src/x64/register-allocator-x64-inl.h @@ -51,18 +51,18 @@ int RegisterAllocator::ToNumber(Register reg) { 2, // rcx 3, // rdx 1, // rbx - -1, // rsp - -1, // rbp - -1, // rsi + -1, // rsp Stack pointer. + -1, // rbp Frame pointer. + -1, // rsi Context. 4, // rdi 5, // r8 6, // r9 - -1, // r10 - 7, // r11 - 11, // r12 - 10, // r13 - 8, // r14 - 9 // r15 + -1, // r10 Scratch register. + 9, // r11 + 10, // r12 + -1, // r13 Roots array. This is callee saved. + 7, // r14 + 8 // r15 }; return kNumbers[reg.code()]; } @@ -71,7 +71,7 @@ int RegisterAllocator::ToNumber(Register reg) { Register RegisterAllocator::ToRegister(int num) { ASSERT(num >= 0 && num < kNumRegisters); const Register kRegisters[] = - { rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 }; + { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 }; return kRegisters[num]; } diff --git a/V8Binding/v8/src/x64/register-allocator-x64.h b/V8Binding/v8/src/x64/register-allocator-x64.h index 8672796..8d666d2 100644 --- a/V8Binding/v8/src/x64/register-allocator-x64.h +++ b/V8Binding/v8/src/x64/register-allocator-x64.h @@ -33,9 +33,7 @@ namespace internal { class RegisterAllocatorConstants : public AllStatic { public: - // Register allocation is not yet implemented on x64, but C++ - // forbids 0-length arrays so we use 1 as the number of registers. - static const int kNumRegisters = 12; + static const int kNumRegisters = 11; static const int kInvalidRegister = -1; }; diff --git a/V8Binding/v8/src/x64/simulator-x64.h b/V8Binding/v8/src/x64/simulator-x64.h index 6b4d718..184c166 100644 --- a/V8Binding/v8/src/x64/simulator-x64.h +++ b/V8Binding/v8/src/x64/simulator-x64.h @@ -45,4 +45,9 @@ (reinterpret_cast<uintptr_t>(this) >= limit ? \ reinterpret_cast<uintptr_t>(this) - limit : 0) +// Call the generated regexp code directly. The entry function pointer should +// expect seven int/pointer sized arguments and return an int. +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ + entry(p0, p1, p2, p3, p4, p5, p6) + #endif // V8_X64_SIMULATOR_X64_H_ diff --git a/V8Binding/v8/src/x64/stub-cache-x64.cc b/V8Binding/v8/src/x64/stub-cache-x64.cc index 98975fb..1443b87 100644 --- a/V8Binding/v8/src/x64/stub-cache-x64.cc +++ b/V8Binding/v8/src/x64/stub-cache-x64.cc @@ -434,7 +434,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED { holder_obj); Label interceptor_failed; - __ Cmp(rax, Factory::no_interceptor_result_sentinel()); + __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); __ j(equal, &interceptor_failed); __ LeaveInternalFrame(); __ ret(0); @@ -612,7 +612,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { __ pop(receiver); // restore holder __ LeaveInternalFrame(); - __ Cmp(rax, Factory::no_interceptor_result_sentinel()); + __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); Label invoke; __ j(not_equal, &invoke); @@ -755,9 +755,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, case BOOLEAN_CHECK: { Label fast; // Check that the object is a boolean. - __ Cmp(rdx, Factory::true_value()); + __ CompareRoot(rdx, Heap::kTrueValueRootIndex); __ j(equal, &fast); - __ Cmp(rdx, Factory::false_value()); + __ CompareRoot(rdx, Heap::kFalseValueRootIndex); __ j(not_equal, &miss); __ bind(&fast); // Check that the maps starting from the prototype haven't changed. @@ -1125,10 +1125,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, // Check for deleted property if property can actually be deleted. if (!is_dont_delete) { - __ Cmp(rax, Factory::the_hole_value()); + __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ j(equal, &miss); } else if (FLAG_debug_code) { - __ Cmp(rax, Factory::the_hole_value()); + __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ Check(not_equal, "DontDelete cells can't contain the hole"); } @@ -1738,9 +1738,129 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, } +// Specialized stub for constructing objects from functions which only have only +// simple assignments of the form this.x = ...; in their body. Object* ConstructStubCompiler::CompileConstructStub( SharedFunctionInfo* shared) { - // Not implemented yet - just jump to generic stub. + // ----------- S t a t e ------------- + // -- rax : argc + // -- rdi : constructor + // -- rsp[0] : return address + // -- rsp[4] : last argument + // ----------------------------------- + Label generic_stub_call; + + // Use r8 for holding undefined which is used in several places below. + __ Move(r8, Factory::undefined_value()); + +#ifdef ENABLE_DEBUGGER_SUPPORT + // Check to see whether there are any break points in the function code. If + // there are jump to the generic constructor stub which calls the actual + // code for the function thereby hitting the break points. + __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset)); + __ cmpq(rbx, r8); + __ j(not_equal, &generic_stub_call); +#endif + + // Load the initial map and verify that it is in fact a map. + __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ testq(rbx, Immediate(kSmiTagMask)); + __ j(zero, &generic_stub_call); + __ CmpObjectType(rbx, MAP_TYPE, rcx); + __ j(not_equal, &generic_stub_call); + +#ifdef DEBUG + // Cannot construct functions this way. + // rdi: constructor + // rbx: initial map + __ CmpInstanceType(rbx, JS_FUNCTION_TYPE); + __ Assert(not_equal, "Function constructed by construct stub."); +#endif + + // Now allocate the JSObject in new space. + // rdi: constructor + // rbx: initial map + __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset)); + __ shl(rcx, Immediate(kPointerSizeLog2)); + __ AllocateObjectInNewSpace(rcx, + rdx, + rcx, + no_reg, + &generic_stub_call, + NO_ALLOCATION_FLAGS); + + // Allocated the JSObject, now initialize the fields and add the heap tag. + // rbx: initial map + // rdx: JSObject (untagged) + __ movq(Operand(rdx, JSObject::kMapOffset), rbx); + __ Move(rbx, Factory::empty_fixed_array()); + __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx); + __ movq(Operand(rdx, JSObject::kElementsOffset), rbx); + + // rax: argc + // rdx: JSObject (untagged) + // Load the address of the first in-object property into r9. + __ lea(r9, Operand(rdx, JSObject::kHeaderSize)); + // Calculate the location of the first argument. The stack contains only the + // return address on top of the argc arguments. + __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0)); + + // rax: argc + // rcx: first argument + // rdx: JSObject (untagged) + // r8: undefined + // r9: first in-object property of the JSObject + // Fill the initialized properties with a constant value or a passed argument + // depending on the this.x = ...; assignment in the function. + for (int i = 0; i < shared->this_property_assignments_count(); i++) { + if (shared->IsThisPropertyAssignmentArgument(i)) { + Label not_passed; + // Set the property to undefined. + __ movq(Operand(r9, i * kPointerSize), r8); + // Check if the argument assigned to the property is actually passed. + int arg_number = shared->GetThisPropertyAssignmentArgument(i); + __ cmpq(rax, Immediate(arg_number)); + __ j(below_equal, ¬_passed); + // Argument passed - find it on the stack. + __ movq(rbx, Operand(rcx, arg_number * -kPointerSize)); + __ movq(Operand(r9, i * kPointerSize), rbx); + __ bind(¬_passed); + } else { + // Set the property to the constant value. + Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); + __ Move(Operand(r9, i * kPointerSize), constant); + } + } + + // Fill the unused in-object property fields with undefined. + for (int i = shared->this_property_assignments_count(); + i < shared->CalculateInObjectProperties(); + i++) { + __ movq(Operand(r9, i * kPointerSize), r8); + } + + // rax: argc + // rdx: JSObject (untagged) + // Move argc to rbx and the JSObject to return to rax and tag it. + __ movq(rbx, rax); + __ movq(rax, rdx); + __ or_(rax, Immediate(kHeapObjectTag)); + + // rax: JSObject + // rbx: argc + // Remove caller arguments and receiver from the stack and return. + __ pop(rcx); + __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize)); + __ push(rcx); + __ IncrementCounter(&Counters::constructed_objects, 1); + __ IncrementCounter(&Counters::constructed_objects_stub, 1); + __ ret(0); + + // Jump to the generic stub in case the specialized code cannot handle the + // construction. + __ bind(&generic_stub_call); Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); Handle<Code> generic_construct_stub(code); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); diff --git a/V8Binding/v8/src/x64/virtual-frame-x64.cc b/V8Binding/v8/src/x64/virtual-frame-x64.cc index fb911a1..c2866a7 100644 --- a/V8Binding/v8/src/x64/virtual-frame-x64.cc +++ b/V8Binding/v8/src/x64/virtual-frame-x64.cc @@ -205,6 +205,14 @@ void VirtualFrame::EmitPush(Handle<Object> value) { } +void VirtualFrame::EmitPush(Heap::RootListIndex index) { + ASSERT(stack_pointer_ == element_count() - 1); + elements_.Add(FrameElement::MemoryElement()); + stack_pointer_++; + __ PushRoot(index); +} + + void VirtualFrame::Drop(int count) { ASSERT(count >= 0); ASSERT(height() >= count); diff --git a/V8Binding/v8/src/x64/virtual-frame-x64.h b/V8Binding/v8/src/x64/virtual-frame-x64.h index 577a18b..006148d 100644 --- a/V8Binding/v8/src/x64/virtual-frame-x64.h +++ b/V8Binding/v8/src/x64/virtual-frame-x64.h @@ -375,6 +375,7 @@ class VirtualFrame : public ZoneObject { // corresponding push instruction. void EmitPush(Register reg); void EmitPush(const Operand& operand); + void EmitPush(Heap::RootListIndex index); void EmitPush(Immediate immediate); // Uses kScratchRegister, emits appropriate relocation info. void EmitPush(Handle<Object> value); diff --git a/V8Binding/v8/test/cctest/test-api.cc b/V8Binding/v8/test/cctest/test-api.cc index d192ebb..80f91d3 100644 --- a/V8Binding/v8/test/cctest/test-api.cc +++ b/V8Binding/v8/test/cctest/test-api.cc @@ -2673,40 +2673,67 @@ THREADED_TEST(SimpleExtensions) { } -static const char* kEvalExtensionSource = - "function UseEval() {" +static const char* kEvalExtensionSource1 = + "function UseEval1() {" " var x = 42;" " return eval('x');" "}"; +static const char* kEvalExtensionSource2 = + "(function() {" + " var x = 42;" + " function e() {" + " return eval('x');" + " }" + " this.UseEval2 = e;" + "})()"; + + THREADED_TEST(UseEvalFromExtension) { v8::HandleScope handle_scope; - v8::RegisterExtension(new Extension("evaltest", kEvalExtensionSource)); - const char* extension_names[] = { "evaltest" }; - v8::ExtensionConfiguration extensions(1, extension_names); + v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1)); + v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2)); + const char* extension_names[] = { "evaltest1", "evaltest2" }; + v8::ExtensionConfiguration extensions(2, extension_names); v8::Handle<Context> context = Context::New(&extensions); Context::Scope lock(context); - v8::Handle<Value> result = Script::Compile(v8_str("UseEval()"))->Run(); + v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run(); + CHECK_EQ(result, v8::Integer::New(42)); + result = Script::Compile(v8_str("UseEval2()"))->Run(); CHECK_EQ(result, v8::Integer::New(42)); } -static const char* kWithExtensionSource = - "function UseWith() {" +static const char* kWithExtensionSource1 = + "function UseWith1() {" " var x = 42;" " with({x:87}) { return x; }" "}"; + +static const char* kWithExtensionSource2 = + "(function() {" + " var x = 42;" + " function e() {" + " with ({x:87}) { return x; }" + " }" + " this.UseWith2 = e;" + "})()"; + + THREADED_TEST(UseWithFromExtension) { v8::HandleScope handle_scope; - v8::RegisterExtension(new Extension("withtest", kWithExtensionSource)); - const char* extension_names[] = { "withtest" }; - v8::ExtensionConfiguration extensions(1, extension_names); + v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1)); + v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2)); + const char* extension_names[] = { "withtest1", "withtest2" }; + v8::ExtensionConfiguration extensions(2, extension_names); v8::Handle<Context> context = Context::New(&extensions); Context::Scope lock(context); - v8::Handle<Value> result = Script::Compile(v8_str("UseWith()"))->Run(); + v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run(); + CHECK_EQ(result, v8::Integer::New(87)); + result = Script::Compile(v8_str("UseWith2()"))->Run(); CHECK_EQ(result, v8::Integer::New(87)); } diff --git a/V8Binding/v8/test/cctest/test-assembler-arm.cc b/V8Binding/v8/test/cctest/test-assembler-arm.cc index fe1621c..34f1639 100644 --- a/V8Binding/v8/test/cctest/test-assembler-arm.cc +++ b/V8Binding/v8/test/cctest/test-assembler-arm.cc @@ -185,7 +185,7 @@ TEST(3) { Label L, C; __ mov(ip, Operand(sp)); - __ stm(db_w, sp, r4.bit() | fp.bit() | sp.bit() | lr.bit()); + __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit()); __ sub(fp, ip, Operand(4)); __ mov(r4, Operand(r0)); __ ldr(r0, MemOperand(r4, OFFSET_OF(T, i))); @@ -199,7 +199,7 @@ TEST(3) { __ add(r0, r2, Operand(r0)); __ mov(r2, Operand(r2, ASR, 3)); __ strh(r2, MemOperand(r4, OFFSET_OF(T, s))); - __ ldm(ia, sp, r4.bit() | fp.bit() | sp.bit() | pc.bit()); + __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit()); CodeDesc desc; assm.GetCode(&desc); diff --git a/V8Binding/v8/test/cctest/test-debug.cc b/V8Binding/v8/test/cctest/test-debug.cc index bd09d0d..0cae26c 100644 --- a/V8Binding/v8/test/cctest/test-debug.cc +++ b/V8Binding/v8/test/cctest/test-debug.cc @@ -4503,14 +4503,16 @@ TEST(DebuggerHostDispatch) { TEST(DebuggerAgent) { - // Make sure this port is not used by other tests to allow tests to run in + // Make sure these ports is not used by other tests to allow tests to run in // parallel. - const int kPort = 5858; + const int kPort1 = 5858; + const int kPort2 = 5857; + const int kPort3 = 5856; - // Make a string with the port number. + // Make a string with the port2 number. const int kPortBufferLen = 6; - char port_str[kPortBufferLen]; - OS::SNPrintF(i::Vector<char>(port_str, kPortBufferLen), "%d", kPort); + char port2_str[kPortBufferLen]; + OS::SNPrintF(i::Vector<char>(port2_str, kPortBufferLen), "%d", kPort2); bool ok; @@ -4518,15 +4520,15 @@ TEST(DebuggerAgent) { i::Socket::Setup(); // Test starting and stopping the agent without any client connection. - i::Debugger::StartAgent("test", kPort); + i::Debugger::StartAgent("test", kPort1); i::Debugger::StopAgent(); // Test starting the agent, connecting a client and shutting down the agent // with the client connected. - ok = i::Debugger::StartAgent("test", kPort); + ok = i::Debugger::StartAgent("test", kPort2); CHECK(ok); i::Socket* client = i::OS::CreateSocket(); - ok = client->Connect("localhost", port_str); + ok = client->Connect("localhost", port2_str); CHECK(ok); i::Debugger::StopAgent(); delete client; @@ -4534,9 +4536,9 @@ TEST(DebuggerAgent) { // Test starting and stopping the agent with the required port already // occoupied. i::Socket* server = i::OS::CreateSocket(); - server->Bind(kPort); + server->Bind(kPort3); - i::Debugger::StartAgent("test", kPort); + i::Debugger::StartAgent("test", kPort3); i::Debugger::StopAgent(); delete server; diff --git a/V8Binding/v8/test/cctest/test-heap.cc b/V8Binding/v8/test/cctest/test-heap.cc index 37dbdd7..eb32b65 100644 --- a/V8Binding/v8/test/cctest/test-heap.cc +++ b/V8Binding/v8/test/cctest/test-heap.cc @@ -179,7 +179,7 @@ TEST(HeapObjects) { TEST(Tagging) { InitializeVM(); int request = 24; - ASSERT_EQ(request, OBJECT_SIZE_ALIGN(request)); + CHECK_EQ(request, static_cast<int>(OBJECT_SIZE_ALIGN(request))); CHECK(Smi::FromInt(42)->IsSmi()); CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure()); CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested()); diff --git a/V8Binding/v8/test/cctest/test-log.cc b/V8Binding/v8/test/cctest/test-log.cc index 5884a41..dafd3aa 100644 --- a/V8Binding/v8/test/cctest/test-log.cc +++ b/V8Binding/v8/test/cctest/test-log.cc @@ -5,12 +5,15 @@ #ifdef ENABLE_LOGGING_AND_PROFILING #ifdef __linux__ +#include <math.h> +#include <pthread.h> #include <signal.h> #include <unistd.h> -#endif +#endif // __linux__ #include "v8.h" #include "log.h" +#include "v8threads.h" #include "cctest.h" using v8::internal::Address; @@ -155,9 +158,10 @@ static bool was_sigprof_received = true; #ifdef __linux__ struct sigaction old_sigprof_handler; +pthread_t our_thread; static void SigProfSignalHandler(int signal, siginfo_t* info, void* context) { - if (signal != SIGPROF) return; + if (signal != SIGPROF || !pthread_equal(pthread_self(), our_thread)) return; was_sigprof_received = true; old_sigprof_handler.sa_sigaction(signal, info, context); } @@ -185,6 +189,7 @@ static int CheckThatProfilerWorks(int log_pos) { // Intercept SIGPROF handler to make sure that the test process // had received it. Under load, system can defer it causing test failure. // It is important to execute this after 'ResumeProfiler'. + our_thread = pthread_self(); was_sigprof_received = false; struct sigaction sa; sa.sa_sigaction = SigProfSignalHandler; @@ -280,6 +285,158 @@ TEST(ProfLazyMode) { } +// Profiling multiple threads that use V8 is currently only available on Linux. +#ifdef __linux__ + +namespace { + +class LoopingThread : public v8::internal::Thread { + public: + LoopingThread() + : v8::internal::Thread(), + semaphore_(v8::internal::OS::CreateSemaphore(0)), + run_(true) { + } + + virtual ~LoopingThread() { delete semaphore_; } + + void Run() { + self_ = pthread_self(); + RunLoop(); + } + + void SendSigProf() { pthread_kill(self_, SIGPROF); } + + void Stop() { run_ = false; } + + bool WaitForRunning() { return semaphore_->Wait(1000000); } + + protected: + bool IsRunning() { return run_; } + + virtual void RunLoop() = 0; + + void SetV8ThreadId() { + v8_thread_id_ = v8::V8::GetCurrentThreadId(); + } + + void SignalRunning() { semaphore_->Signal(); } + + private: + v8::internal::Semaphore* semaphore_; + bool run_; + pthread_t self_; + int v8_thread_id_; +}; + + +class LoopingJsThread : public LoopingThread { + public: + void RunLoop() { + { + v8::Locker locker; + CHECK(v8::internal::ThreadManager::HasId()); + SetV8ThreadId(); + } + while (IsRunning()) { + v8::Locker locker; + v8::HandleScope scope; + v8::Persistent<v8::Context> context = v8::Context::New(); + v8::Context::Scope context_scope(context); + SignalRunning(); + CompileAndRunScript( + "var j; for (var i=0; i<10000; ++i) { j = Math.sin(i); }"); + context.Dispose(); + i::OS::Sleep(1); + } + } +}; + + +class LoopingNonJsThread : public LoopingThread { + public: + void RunLoop() { + v8::Locker locker; + v8::Unlocker unlocker; + // Now thread has V8's id, but will not run VM code. + CHECK(v8::internal::ThreadManager::HasId()); + double i = 10; + SignalRunning(); + while (IsRunning()) { + i = sin(i); + i::OS::Sleep(1); + } + } +}; + + +class TestSampler : public v8::internal::Sampler { + public: + TestSampler() + : Sampler(0, true), + semaphore_(v8::internal::OS::CreateSemaphore(0)), + was_sample_stack_called_(false) { + } + + ~TestSampler() { delete semaphore_; } + + void SampleStack(v8::internal::TickSample*) { + was_sample_stack_called_ = true; + } + + void Tick(v8::internal::TickSample*) { semaphore_->Signal(); } + + bool WaitForTick() { return semaphore_->Wait(1000000); } + + void Reset() { was_sample_stack_called_ = false; } + + bool WasSampleStackCalled() { return was_sample_stack_called_; } + + private: + v8::internal::Semaphore* semaphore_; + bool was_sample_stack_called_; +}; + + +} // namespace + +TEST(ProfMultipleThreads) { + // V8 needs to be initialized before the first Locker + // instantiation. Otherwise, Top::Initialize will reset + // thread_id_ in ThreadTopLocal. + v8::HandleScope scope; + v8::Handle<v8::Context> env = v8::Context::New(); + env->Enter(); + + LoopingJsThread jsThread; + jsThread.Start(); + LoopingNonJsThread nonJsThread; + nonJsThread.Start(); + + TestSampler sampler; + sampler.Start(); + CHECK(!sampler.WasSampleStackCalled()); + jsThread.WaitForRunning(); + jsThread.SendSigProf(); + CHECK(sampler.WaitForTick()); + CHECK(sampler.WasSampleStackCalled()); + sampler.Reset(); + CHECK(!sampler.WasSampleStackCalled()); + nonJsThread.WaitForRunning(); + nonJsThread.SendSigProf(); + CHECK(sampler.WaitForTick()); + CHECK(!sampler.WasSampleStackCalled()); + sampler.Stop(); + + jsThread.Stop(); + nonJsThread.Stop(); + jsThread.Join(); + nonJsThread.Join(); +} + +#endif // __linux__ + + static inline bool IsStringEqualTo(const char* r, const char* s) { return strncmp(r, s, strlen(r)) == 0; } diff --git a/V8Binding/v8/test/cctest/test-regexp.cc b/V8Binding/v8/test/cctest/test-regexp.cc index 89c7868..81c2205 100644 --- a/V8Binding/v8/test/cctest/test-regexp.cc +++ b/V8Binding/v8/test/cctest/test-regexp.cc @@ -40,6 +40,7 @@ #include "regexp-macro-assembler-irregexp.h" #ifdef V8_NATIVE_REGEXP #ifdef V8_TARGET_ARCH_ARM +#include "arm/macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h" #endif #ifdef V8_TARGET_ARCH_X64 @@ -605,11 +606,12 @@ TEST(DispatchTableConstruction) { #ifdef V8_NATIVE_REGEXP -#ifdef V8_TARGET_ARCH_IA32 +#if V8_TARGET_ARCH_IA32 typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler; -#endif -#ifdef V8_TARGET_ARCH_X64 +#elif V8_TARGET_ARCH_X64 typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler; +#elif V8_TARGET_ARCH_ARM +typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler; #endif class ContextInitializer { @@ -845,7 +847,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) { v8::V8::Initialize(); ContextInitializer initializer; - ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 3); + ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4); m.WriteCurrentPositionToRegister(0, 0); m.AdvanceCurrentPosition(2); @@ -870,7 +872,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) { Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input); Address start_adr = seq_input->GetCharsAddress(); - int output[3]; + int output[4]; NativeRegExpMacroAssembler::Result result = Execute(*code, *input, @@ -884,6 +886,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) { CHECK_EQ(0, output[0]); CHECK_EQ(2, output[1]); CHECK_EQ(6, output[2]); + CHECK_EQ(-1, output[3]); } @@ -891,7 +894,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) { v8::V8::Initialize(); ContextInitializer initializer; - ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 3); + ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4); m.WriteCurrentPositionToRegister(0, 0); m.AdvanceCurrentPosition(2); @@ -918,7 +921,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) { Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input); Address start_adr = seq_input->GetCharsAddress(); - int output[3]; + int output[4]; NativeRegExpMacroAssembler::Result result = Execute(*code, *input, @@ -932,6 +935,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) { CHECK_EQ(0, output[0]); CHECK_EQ(2, output[1]); CHECK_EQ(6, output[2]); + CHECK_EQ(-1, output[3]); } @@ -1055,12 +1059,12 @@ TEST(MacroAssemblerNativeRegisters) { v8::V8::Initialize(); ContextInitializer initializer; - ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 5); + ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 6); uc16 foo_chars[3] = {'f', 'o', 'o'}; Vector<const uc16> foo(foo_chars, 3); - enum registers { out1, out2, out3, out4, out5, sp, loop_cnt }; + enum registers { out1, out2, out3, out4, out5, out6, sp, loop_cnt }; Label fail; Label backtrack; m.WriteCurrentPositionToRegister(out1, 0); // Output: [0] @@ -1114,7 +1118,7 @@ TEST(MacroAssemblerNativeRegisters) { m.GoTo(&loop3); m.Bind(&exit_loop3); m.PopCurrentPosition(); - m.WriteCurrentPositionToRegister(out5, 0); // [0,3,6,9,9] + m.WriteCurrentPositionToRegister(out5, 0); // [0,3,6,9,9,-1] m.Succeed(); @@ -1132,15 +1136,15 @@ TEST(MacroAssemblerNativeRegisters) { Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input); Address start_adr = seq_input->GetCharsAddress(); - int output[5]; + int output[6]; NativeRegExpMacroAssembler::Result result = Execute(*code, - *input, - 0, - start_adr, - start_adr + input->length(), - output, - true); + *input, + 0, + start_adr, + start_adr + input->length(), + output, + true); CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result); CHECK_EQ(0, output[0]); @@ -1148,6 +1152,7 @@ TEST(MacroAssemblerNativeRegisters) { CHECK_EQ(6, output[2]); CHECK_EQ(9, output[3]); CHECK_EQ(9, output[4]); + CHECK_EQ(-1, output[5]); } diff --git a/V8Binding/v8/test/cctest/test-thread-termination.cc b/V8Binding/v8/test/cctest/test-thread-termination.cc index 323be1d..552f49d 100644 --- a/V8Binding/v8/test/cctest/test-thread-termination.cc +++ b/V8Binding/v8/test/cctest/test-thread-termination.cc @@ -193,3 +193,63 @@ TEST(TerminateMultipleV8Threads) { delete semaphore; semaphore = NULL; } + + +int call_count = 0; + + +v8::Handle<v8::Value> TerminateOrReturnObject(const v8::Arguments& args) { + if (++call_count == 10) { + v8::V8::TerminateExecution(); + return v8::Undefined(); + } + v8::Local<v8::Object> result = v8::Object::New(); + result->Set(v8::String::New("x"), v8::Integer::New(42)); + return result; +} + + +v8::Handle<v8::Value> LoopGetProperty(const v8::Arguments& args) { + v8::TryCatch try_catch; + v8::Script::Compile(v8::String::New("function f() {" + " try {" + " while(true) {" + " terminate_or_return_object().x;" + " }" + " fail();" + " } catch(e) {" + " fail();" + " }" + "}" + "f()"))->Run(); + CHECK(try_catch.HasCaught()); + CHECK(try_catch.Exception()->IsNull()); + CHECK(try_catch.Message().IsEmpty()); + CHECK(!try_catch.CanContinue()); + return v8::Undefined(); +} + + +// Test that we correctly handle termination exceptions if they are +// triggered by the creation of error objects in connection with ICs. +TEST(TerminateLoadICException) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(); + global->Set(v8::String::New("terminate_or_return_object"), + v8::FunctionTemplate::New(TerminateOrReturnObject)); + global->Set(v8::String::New("fail"), v8::FunctionTemplate::New(Fail)); + global->Set(v8::String::New("loop"), + v8::FunctionTemplate::New(LoopGetProperty)); + + v8::Persistent<v8::Context> context = v8::Context::New(NULL, global); + v8::Context::Scope context_scope(context); + // Run a loop that will be infinite if thread termination does not work. + v8::Handle<v8::String> source = + v8::String::New("try { loop(); fail(); } catch(e) { fail(); }"); + call_count = 0; + v8::Script::Compile(source)->Run(); + // Test that we can run the code again after thread termination. + call_count = 0; + v8::Script::Compile(source)->Run(); + context.Dispose(); +} diff --git a/V8Binding/v8/test/cctest/test-utils.cc b/V8Binding/v8/test/cctest/test-utils.cc index 23b3254..ffcaf8a 100644 --- a/V8Binding/v8/test/cctest/test-utils.cc +++ b/V8Binding/v8/test/cctest/test-utils.cc @@ -158,7 +158,7 @@ TEST(Utils1) { // int8_t and intptr_t signed integers. CHECK_EQ(-2, -8 >> 2); CHECK_EQ(-2, static_cast<int8_t>(-8) >> 2); - CHECK_EQ(-2, static_cast<intptr_t>(-8) >> 2); + CHECK_EQ(-2, static_cast<int>(static_cast<intptr_t>(-8) >> 2)); } diff --git a/V8Binding/v8/test/cctest/testcfg.py b/V8Binding/v8/test/cctest/testcfg.py index 75377db..c2427c8 100644 --- a/V8Binding/v8/test/cctest/testcfg.py +++ b/V8Binding/v8/test/cctest/testcfg.py @@ -31,7 +31,7 @@ from os.path import join, dirname, exists import platform import utils -DEBUG_FLAGS = ['--enable-slow-asserts', '--debug-code', '--verify-heap'] +CCTEST_DEBUG_FLAGS = ['--enable-slow-asserts', '--debug-code', '--verify-heap'] class CcTestCase(test.TestCase): @@ -55,7 +55,7 @@ class CcTestCase(test.TestCase): serialization_option = '--testing_serialization_file=' + serialization_file result = [ self.executable, name, serialization_option ] if self.mode == 'debug': - result += DEBUG_FLAGS + result += CCTEST_DEBUG_FLAGS return result def GetCommand(self): diff --git a/V8Binding/v8/test/mjsunit/debug-step-stub-callfunction.js b/V8Binding/v8/test/mjsunit/debug-step-stub-callfunction.js index fbb8078..50d095b 100644 --- a/V8Binding/v8/test/mjsunit/debug-step-stub-callfunction.js +++ b/V8Binding/v8/test/mjsunit/debug-step-stub-callfunction.js @@ -54,7 +54,7 @@ function f() { break_break_point_hit_count = 0; f(); -assertEquals(5, break_break_point_hit_count); +assertEquals(6, break_break_point_hit_count); // Use an inner function to ensure that the function call is through CodeStub // CallFunction see Ia32CodeGenerator::VisitCall and @@ -67,7 +67,21 @@ function g() { break_break_point_hit_count = 0; g(); -assertEquals(4, break_break_point_hit_count); +assertEquals(5, break_break_point_hit_count); + + +// Use an inner function to ensure that the function call is through CodeStub +// CallFunction. +function testCallInExpreesion() { + function h() {} + debugger; + var x = 's' + h(10, 20); +}; + +break_break_point_hit_count = 0; +testCallInExpreesion(); +assertEquals(5, break_break_point_hit_count); + // Get rid of the debug event listener. Debug.setListener(null); diff --git a/V8Binding/v8/test/mjsunit/debug-stepin-call-function-stub.js b/V8Binding/v8/test/mjsunit/debug-stepin-call-function-stub.js new file mode 100644 index 0000000..12f5142 --- /dev/null +++ b/V8Binding/v8/test/mjsunit/debug-stepin-call-function-stub.js @@ -0,0 +1,115 @@ +// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 0;
+var expected_function_name = null;
+var expected_source_line_text = null;
+var expected_caller_source_line = null;
+var step_in_count = 2;
+
+// Simple debug event handler which first time will cause 'step in' action
+// to get into g.call and than check that execution is pauesed inside
+// function 'g'.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 0) {
+ // Step into f().
+ exec_state.prepareStep(Debug.StepAction.StepIn, step_in_count);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ assertEquals(expected_function_name, event_data.func().name());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+function g() {
+ return "s"; // expected line
+}
+
+function testFunction() {
+ var f = g;
+ var s = 1 +f(10);
+}
+
+function g2() {
+ return "s2"; // expected line
+}
+
+function testFunction2() {
+ var f = g2;
+ var s = 1 +f(10, 20);
+}
+
+// Run three times. First time the function will be compiled lazily,
+// second time cached version will be used.
+for (var i = 0; i < 3; i++) {
+ state = 0;
+ expected_function_name = 'g';
+ expected_source_line_text = ' return "s"; // expected line';
+ step_in_count = 2;
+ // Set a break point and call to invoke the debug event listener.
+ Debug.setBreakPoint(testFunction, 1, 0);
+ testFunction();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+// Test stepping into function call when a breakpoint is set at the place
+// of call. Use different pair of functions so that g2 is compiled lazily.
+// Run twice: first time function will be compiled lazily, second time
+// cached version will be used.
+for (var i = 0; i < 3; i++) {
+ state = 0;
+ expected_function_name = 'g2';
+ expected_source_line_text = ' return "s2"; // expected line';
+ step_in_count = 1;
+ // Set a break point and call to invoke the debug event listener.
+ Debug.setBreakPoint(testFunction2, 2, 0);
+ testFunction2();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/V8Binding/v8/test/mjsunit/mjsunit.status b/V8Binding/v8/test/mjsunit/mjsunit.status index 6ac4938..839329d 100644 --- a/V8Binding/v8/test/mjsunit/mjsunit.status +++ b/V8Binding/v8/test/mjsunit/mjsunit.status @@ -60,6 +60,7 @@ debug-setbreakpoint: CRASH || FAIL || PASS debug-step-stub-callfunction: SKIP debug-stepin-accessor: CRASH || FAIL debug-stepin-builtin: CRASH || FAIL +debug-stepin-call-function-stub: CRASH || FAIL debug-stepin-constructor: CRASH, FAIL debug-stepin-function-call: CRASH || FAIL debug-step: SKIP diff --git a/V8Binding/v8/test/mjsunit/regress/regress-246.js b/V8Binding/v8/test/mjsunit/regress/regress-246.js index 4324b54..4324b54 100755..100644 --- a/V8Binding/v8/test/mjsunit/regress/regress-246.js +++ b/V8Binding/v8/test/mjsunit/regress/regress-246.js diff --git a/V8Binding/v8/test/mjsunit/regress/regress-254.js b/V8Binding/v8/test/mjsunit/regress/regress-254.js index ec4b40a..ec4b40a 100755..100644 --- a/V8Binding/v8/test/mjsunit/regress/regress-254.js +++ b/V8Binding/v8/test/mjsunit/regress/regress-254.js diff --git a/V8Binding/v8/test/mjsunit/regress/regress-crbug-18639.js b/V8Binding/v8/test/mjsunit/regress/regress-crbug-18639.js new file mode 100644 index 0000000..23e225a --- /dev/null +++ b/V8Binding/v8/test/mjsunit/regress/regress-crbug-18639.js @@ -0,0 +1,34 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See http://crbug.com/18639 + +toString = toString; +__defineGetter__("z", (0).toLocaleString); +z; +z; +((0).toLocaleString)(); diff --git a/V8Binding/v8/test/mjsunit/testcfg.py b/V8Binding/v8/test/mjsunit/testcfg.py index 96840f5..97924c8 100644 --- a/V8Binding/v8/test/mjsunit/testcfg.py +++ b/V8Binding/v8/test/mjsunit/testcfg.py @@ -31,7 +31,7 @@ from os.path import join, dirname, exists import re import tempfile - +MJSUNIT_DEBUG_FLAGS = ['--enable-slow-asserts', '--debug-code', '--verify-heap'] FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)") FILES_PATTERN = re.compile(r"//\s+Files:(.*)") SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME") @@ -58,6 +58,8 @@ class MjsunitTestCase(test.TestCase): flags_match = FLAGS_PATTERN.search(source) if flags_match: result += flags_match.group(1).strip().split() + if self.mode == 'debug': + result += MJSUNIT_DEBUG_FLAGS additional_files = [] files_match = FILES_PATTERN.search(source); # Accept several lines of 'Files:' diff --git a/V8Binding/v8/test/mjsunit/transcendentals.js b/V8Binding/v8/test/mjsunit/transcendentals.js new file mode 100644 index 0000000..78e6c48 --- /dev/null +++ b/V8Binding/v8/test/mjsunit/transcendentals.js @@ -0,0 +1,49 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Two fp numbers that have the same hash value (see TranscendentalCache +// in heap.h). +var x = 0x123456789ABCD / 0x2000000000000; +var y = 0x1134567899BCD / 0x2000000000000; + +assertTrue(Math.sin(x) != Math.sin(y)); + +assertTrue(Math.cos(x) != Math.cos(y)); + +assertTrue(Math.tan(x) != Math.tan(y)); + +assertTrue(Math.log(x) != Math.log(y)); + +assertTrue(Math.asin(x) != Math.asin(y)); + +assertTrue(Math.acos(x) != Math.acos(y)); + +assertTrue(Math.atan(x) != Math.atan(y)); + +assertTrue(Math.exp(x) != Math.exp(y)); + diff --git a/V8Binding/v8/test/mozilla/mozilla.status b/V8Binding/v8/test/mozilla/mozilla.status index 41395b3..9793dc8 100644 --- a/V8Binding/v8/test/mozilla/mozilla.status +++ b/V8Binding/v8/test/mozilla/mozilla.status @@ -124,6 +124,10 @@ ecma/Date/15.9.5.28-1: PASS || ($ARM && FAIL) ecma/Array/15.4.4.5-3: PASS || ($ARM && FAIL) ecma/Date/15.9.5.22-2: PASS || ($ARM && FAIL) +# Flaky test that fails due to what appears to be a bug in the test. +# Occurs depending on current time +ecma/Date/15.9.5.8: PASS || FAIL + # Severely brain-damaged test. Access to local variables must not # be more than 2.5 times faster than access to global variables? WTF? js1_5/Regress/regress-169559: PASS || FAIL diff --git a/V8Binding/v8/tools/gyp/v8.gyp b/V8Binding/v8/tools/gyp/v8.gyp index 037efa7..1222ea9 100644 --- a/V8Binding/v8/tools/gyp/v8.gyp +++ b/V8Binding/v8/tools/gyp/v8.gyp @@ -433,18 +433,14 @@ '../../src/ia32/jump-target-ia32.cc', '../../src/ia32/macro-assembler-ia32.cc', '../../src/ia32/macro-assembler-ia32.h', + '../../src/ia32/regexp-macro-assembler-ia32.cc', + '../../src/ia32/regexp-macro-assembler-ia32.h', '../../src/ia32/register-allocator-ia32.cc', '../../src/ia32/stub-cache-ia32.cc', '../../src/ia32/virtual-frame-ia32.cc', '../../src/ia32/virtual-frame-ia32.h', ], }], - ['target_arch=="ia32" and v8_regexp=="native"', { - 'sources': [ - '../../src/ia32/regexp-macro-assembler-ia32.cc', - '../../src/ia32/regexp-macro-assembler-ia32.h', - ], - }], ['target_arch=="x64"', { 'include_dirs+': [ '../../src/x64', @@ -466,18 +462,14 @@ '../../src/x64/jump-target-x64.cc', '../../src/x64/macro-assembler-x64.cc', '../../src/x64/macro-assembler-x64.h', + '../../src/x64/regexp-macro-assembler-x64.cc', + '../../src/x64/regexp-macro-assembler-x64.h', '../../src/x64/register-allocator-x64.cc', '../../src/x64/stub-cache-x64.cc', '../../src/x64/virtual-frame-x64.cc', '../../src/x64/virtual-frame-x64.h', ], }], - ['target_arch=="x64" and v8_regexp=="native"', { - 'sources': [ - '../../src/x64/regexp-macro-assembler-x64.cc', - '../../src/x64/regexp-macro-assembler-x64.h', - ], - }], ['OS=="linux"', { 'link_settings': { 'libraries': [ diff --git a/V8Binding/v8/tools/js2c.py b/V8Binding/v8/tools/js2c.py index 52fe35c..cae39e8 100755 --- a/V8Binding/v8/tools/js2c.py +++ b/V8Binding/v8/tools/js2c.py @@ -45,6 +45,13 @@ def ToCArray(lines): return ", ".join(result) +def RemoveCommentsAndTrailingWhitespace(lines): + lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments + lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments. + lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace + return lines + + def CompressScript(lines, do_jsmin): # If we're not expecting this code to be user visible, we can run it through # a more aggressive minifier. @@ -55,9 +62,7 @@ def CompressScript(lines, do_jsmin): # people print the source code using Function.prototype.toString(). # Note that we could easily compress the scripts mode but don't # since we want it to remain readable. - lines = re.sub('//.*\n', '\n', lines) # end-of-line comments - lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments. - lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace + lines = RemoveCommentsAndTrailingWhitespace(lines) return lines @@ -96,6 +101,22 @@ def ParseValue(string): return string +EVAL_PATTERN = re.compile(r'\beval\s*\('); +WITH_PATTERN = re.compile(r'\bwith\s*\('); + + +def Validate(lines, file): + lines = RemoveCommentsAndTrailingWhitespace(lines) + # Because of simplified context setup, eval and with is not + # allowed in the natives files. + eval_match = EVAL_PATTERN.search(lines) + if eval_match: + raise ("Eval disallowed in natives: %s" % file) + with_match = WITH_PATTERN.search(lines) + if with_match: + raise ("With statements disallowed in natives: %s" % file) + + def ExpandConstants(lines, constants): for key, value in constants.items(): lines = lines.replace(key, str(value)) @@ -155,9 +176,9 @@ class PythonMacro: args.append(mapping[arg]) return str(self.fun(*args)) -CONST_PATTERN = re.compile('^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$') -MACRO_PATTERN = re.compile('^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$') -PYTHON_MACRO_PATTERN = re.compile('^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$') +CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$') +MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$') +PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$') def ReadMacros(lines): constants = { } @@ -275,15 +296,17 @@ def JS2C(source, target, env): # Build source code lines source_lines = [ ] source_lines_empty = [] - for s in modules: - delay = str(s).endswith('-delay.js') - lines = ReadFile(str(s)) + for module in modules: + filename = str(module) + delay = filename.endswith('-delay.js') + lines = ReadFile(filename) do_jsmin = lines.find('// jsminify this file, js2c: jsmin') != -1 lines = ExpandConstants(lines, consts) lines = ExpandMacros(lines, macros) + Validate(lines, filename) lines = CompressScript(lines, do_jsmin) data = ToCArray(lines) - id = (os.path.split(str(s))[1])[:-3] + id = (os.path.split(filename)[1])[:-3] if delay: id = id[:-6] if delay: delay_ids.append((id, len(lines))) @@ -291,7 +314,7 @@ def JS2C(source, target, env): ids.append((id, len(lines))) source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data }) source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 }) - + # Build delay support functions get_index_cases = [ ] get_script_source_cases = [ ] diff --git a/V8Binding/v8/tools/tickprocessor.js b/V8Binding/v8/tools/tickprocessor.js index 72b3059..84f0eea 100644 --- a/V8Binding/v8/tools/tickprocessor.js +++ b/V8Binding/v8/tools/tickprocessor.js @@ -476,7 +476,7 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() { function MacCppEntriesProvider(nmExec) { UnixCppEntriesProvider.call(this, nmExec); // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups. - this.FUNC_RE = /^([0-9a-fA-F]{8}) ()[iItT] (.*)$/; + this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ()[iItT] (.*)$/; }; inherits(MacCppEntriesProvider, UnixCppEntriesProvider); diff --git a/V8Binding/v8/tools/visual_studio/arm.vsprops b/V8Binding/v8/tools/visual_studio/arm.vsprops index 3aa9374..0d6a888 100644 --- a/V8Binding/v8/tools/visual_studio/arm.vsprops +++ b/V8Binding/v8/tools/visual_studio/arm.vsprops @@ -2,11 +2,13 @@ <VisualStudioPropertySheet ProjectType="Visual C++" Version="8.00" + OutputDirectory="$(SolutionDir)$(ConfigurationName)Arm" + IntermediateDirectory="$(SolutionDir)$(ConfigurationName)Arm\obj\$(ProjectName)" Name="arm" > <Tool Name="VCCLCompilerTool" - PreprocessorDefinitions="V8_TARGET_ARCH_ARM" + PreprocessorDefinitions="_USE_32BIT_TIME_T;V8_TARGET_ARCH_ARM;V8_NATIVE_REGEXP" DisableSpecificWarnings="4996" /> </VisualStudioPropertySheet> diff --git a/V8Binding/v8/tools/visual_studio/common.vsprops b/V8Binding/v8/tools/visual_studio/common.vsprops index d23e4fc..238dd97 100644 --- a/V8Binding/v8/tools/visual_studio/common.vsprops +++ b/V8Binding/v8/tools/visual_studio/common.vsprops @@ -3,8 +3,6 @@ ProjectType="Visual C++" Version="8.00" Name="essential" - OutputDirectory="$(SolutionDir)$(ConfigurationName)" - IntermediateDirectory="$(SolutionDir)$(ConfigurationName)\obj\$(ProjectName)" CharacterSet="1" > <Tool diff --git a/V8Binding/v8/tools/visual_studio/d8_arm.vcproj b/V8Binding/v8/tools/visual_studio/d8_arm.vcproj new file mode 100644 index 0000000..fbebdb3 --- /dev/null +++ b/V8Binding/v8/tools/visual_studio/d8_arm.vcproj @@ -0,0 +1,199 @@ +<?xml version="1.0" encoding="Windows-1252"?> +<VisualStudioProject + ProjectType="Visual C++" + Version="8.00" + Name="d8" + ProjectGUID="{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}" + RootNamespace="d8" + Keyword="Win32Proj" + > + <Platforms> + <Platform + Name="Win32" + /> + </Platforms> + <ToolFiles> + </ToolFiles> + <Configurations> + <Configuration + Name="Debug|Win32" + ConfigurationType="1" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCWebDeploymentTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + <Configuration + Name="Release|Win32" + ConfigurationType="1" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCWebDeploymentTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + </Configurations> + <References> + </References> + <Files> + <File + RelativePath="..\..\src\d8.cc" + > + </File> + <File + RelativePath="..\..\src\d8.h" + > + </File> + <File + RelativePath="..\..\src\d8-debug.cc" + > + </File> + <File + RelativePath="..\..\src\d8-debug.h" + > + </File> + <File + RelativePath="..\..\src\d8-windows.cc" + > + </File> + <File + RelativePath="..\..\src\d8.js" + > + <FileConfiguration + Name="Debug|Win32" + > + <Tool + Name="VCCustomBuildTool" + Description="Processing js files..." + CommandLine=".\d8js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" + Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" + /> + </FileConfiguration> + <FileConfiguration + Name="Release|Win32" + > + <Tool + Name="VCCustomBuildTool" + Description="Processing js files..." + CommandLine=".\d8js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" + Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" + /> + </FileConfiguration> + </File> + <Filter + Name="generated files" + > + <File + RelativePath="$(IntDir)\DerivedSources\natives.cc" + > + </File> + </Filter> + </Files> + <Globals> + </Globals> +</VisualStudioProject> diff --git a/V8Binding/v8/tools/visual_studio/ia32.vsprops b/V8Binding/v8/tools/visual_studio/ia32.vsprops index f48e808..0399bbb 100644 --- a/V8Binding/v8/tools/visual_studio/ia32.vsprops +++ b/V8Binding/v8/tools/visual_studio/ia32.vsprops @@ -2,6 +2,8 @@ <VisualStudioPropertySheet ProjectType="Visual C++" Version="8.00" + OutputDirectory="$(SolutionDir)$(ConfigurationName)" + IntermediateDirectory="$(SolutionDir)$(ConfigurationName)\obj\$(ProjectName)" Name="ia32" > <Tool diff --git a/V8Binding/v8/tools/visual_studio/v8_arm.sln b/V8Binding/v8/tools/visual_studio/v8_arm.sln index 2dc6cf5..069ff32 100644 --- a/V8Binding/v8/tools/visual_studio/v8_arm.sln +++ b/V8Binding/v8/tools/visual_studio/v8_arm.sln @@ -1,11 +1,11 @@ Microsoft Visual Studio Solution File, Format Version 9.00 # Visual Studio 2005 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8", "v8.vcproj", "{21E22961-22BF-4493-BD3A-868F93DA5179}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8", "v8_arm.vcproj", "{21E22961-22BF-4493-BD3A-868F93DA5179}" ProjectSection(ProjectDependencies) = postProject {EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142} EndProjectSection EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_shell_sample", "v8_shell_sample.vcproj", "{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_shell_sample", "v8_shell_sample_arm.vcproj", "{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}" ProjectSection(ProjectDependencies) = postProject {EC8B7909-62AF-470D-A75D-E1D89C837142} = {EC8B7909-62AF-470D-A75D-E1D89C837142} {21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179} @@ -13,14 +13,14 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_shell_sample", "v8_shell EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "samples", "samples", "{E131F77D-B713-48F3-B86D-097ECDCC4C3A}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_process_sample", "v8_process_sample.vcproj", "{EF019874-D38A-40E3-B17C-DB5923F0A79C}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "v8_process_sample", "v8_process_sample_arm.vcproj", "{EF019874-D38A-40E3-B17C-DB5923F0A79C}" ProjectSection(ProjectDependencies) = postProject {21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179} EndProjectSection EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{AD933CE2-1303-448E-89C8-60B1FDD18EC3}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "d8", "d8.vcproj", "{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "d8", "d8_arm.vcproj", "{7E4C7D2D-A4B9-40B9-8192-22654E626F6C}" ProjectSection(ProjectDependencies) = postProject {21E22961-22BF-4493-BD3A-868F93DA5179} = {21E22961-22BF-4493-BD3A-868F93DA5179} EndProjectSection diff --git a/V8Binding/v8/tools/visual_studio/v8_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_arm.vcproj new file mode 100644 index 0000000..f8cbcc4 --- /dev/null +++ b/V8Binding/v8/tools/visual_studio/v8_arm.vcproj @@ -0,0 +1,223 @@ +<?xml version="1.0" encoding="Windows-1252"?> +<VisualStudioProject + ProjectType="Visual C++" + Version="8.00" + Name="v8" + ProjectGUID="{21E22961-22BF-4493-BD3A-868F93DA5179}" + RootNamespace="v8" + Keyword="Win32Proj" + > + <Platforms> + <Platform + Name="Win32" + /> + </Platforms> + <ToolFiles> + </ToolFiles> + <Configurations> + <Configuration + Name="Debug|Win32" + ConfigurationType="4" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLibrarianTool" + LinkLibraryDependencies="true" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + <Configuration + Name="Release|Win32" + ConfigurationType="4" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLibrarianTool" + LinkLibraryDependencies="true" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + </Configurations> + <References> + </References> + <Files> + <Filter + Name="js" + > + <File + RelativePath="..\..\src\apinatives.js" + > + </File> + <File + RelativePath="..\..\src\array.js" + > + </File> + <File + RelativePath="..\..\src\date-delay.js" + > + </File> + <File + RelativePath="..\..\src\debug-delay.js" + > + </File> + <File + RelativePath="..\..\src\macros.py" + > + </File> + <File + RelativePath="..\..\src\math.js" + > + </File> + <File + RelativePath="..\..\src\messages.js" + > + </File> + <File + RelativePath="..\..\src\mirror-delay.js" + > + </File> + <File + RelativePath="..\..\src\regexp-delay.js" + > + </File> + <File + RelativePath="..\..\src\json-delay.js" + > + </File> + <File + RelativePath="..\..\src\runtime.js" + > + </File> + <File + RelativePath="..\..\src\string.js" + > + </File> + <File + RelativePath="..\..\src\uri.js" + > + </File> + <File + RelativePath="..\..\src\v8natives.js" + > + <FileConfiguration + Name="Debug|Win32" + > + <Tool + Name="VCCustomBuildTool" + Description="Processing js files..." + CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" + AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js" + Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" + /> + </FileConfiguration> + <FileConfiguration + Name="Release|Win32" + > + <Tool + Name="VCCustomBuildTool" + Description="Processing js files..." + CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" + AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js" + Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" + /> + </FileConfiguration> + </File> + </Filter> + <Filter + Name="generated files" + > + <File + RelativePath="$(IntDir)\DerivedSources\natives.cc" + > + </File> + </Filter> + <File + RelativePath="..\..\src\snapshot-empty.cc" + > + </File> + </Files> + <Globals> + </Globals> +</VisualStudioProject> diff --git a/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj index a027a84..bd49f3b 100644 --- a/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj +++ b/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj @@ -49,6 +49,7 @@ /> <Tool Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" /> <Tool Name="VCALinkTool" @@ -109,6 +110,7 @@ /> <Tool Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" /> <Tool Name="VCALinkTool" diff --git a/V8Binding/v8/tools/visual_studio/v8_process_sample_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_process_sample_arm.vcproj new file mode 100644 index 0000000..7320231 --- /dev/null +++ b/V8Binding/v8/tools/visual_studio/v8_process_sample_arm.vcproj @@ -0,0 +1,151 @@ +<?xml version="1.0" encoding="Windows-1252"?> +<VisualStudioProject + ProjectType="Visual C++" + Version="8.00" + Name="v8_process_sample" + ProjectGUID="{EF019874-D38A-40E3-B17C-DB5923F0A79C}" + RootNamespace="v8_process_sample" + Keyword="Win32Proj" + > + <Platforms> + <Platform + Name="Win32" + /> + </Platforms> + <ToolFiles> + </ToolFiles> + <Configurations> + <Configuration + Name="Debug|Win32" + ConfigurationType="1" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCWebDeploymentTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + <Configuration + Name="Release|Win32" + ConfigurationType="1" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCWebDeploymentTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + </Configurations> + <References> + </References> + <Files> + <File + RelativePath="..\..\samples\process.cc" + > + </File> + </Files> + <Globals> + </Globals> +</VisualStudioProject> diff --git a/V8Binding/v8/tools/visual_studio/v8_shell_sample_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_shell_sample_arm.vcproj new file mode 100644 index 0000000..ba7e0e0 --- /dev/null +++ b/V8Binding/v8/tools/visual_studio/v8_shell_sample_arm.vcproj @@ -0,0 +1,151 @@ +<?xml version="1.0" encoding="Windows-1252"?> +<VisualStudioProject + ProjectType="Visual C++" + Version="8.00" + Name="v8_shell_sample" + ProjectGUID="{2DE20FFA-6F5E-48D9-84D8-09B044A5B119}" + RootNamespace="v8_shell_sample" + Keyword="Win32Proj" + > + <Platforms> + <Platform + Name="Win32" + /> + </Platforms> + <ToolFiles> + </ToolFiles> + <Configurations> + <Configuration + Name="Debug|Win32" + ConfigurationType="1" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\debug.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCWebDeploymentTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + <Configuration + Name="Release|Win32" + ConfigurationType="1" + InheritedPropertySheets=".\common.vsprops;.\arm.vsprops;.\release.vsprops" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + AdditionalDependencies="winmm.lib Ws2_32.lib" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCWebDeploymentTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + </Configurations> + <References> + </References> + <Files> + <File + RelativePath="..\..\samples\shell.cc" + > + </File> + </Files> + <Globals> + </Globals> +</VisualStudioProject> diff --git a/V8Binding/v8/tools/visual_studio/x64.vsprops b/V8Binding/v8/tools/visual_studio/x64.vsprops index af0e47c..7587acf 100644 --- a/V8Binding/v8/tools/visual_studio/x64.vsprops +++ b/V8Binding/v8/tools/visual_studio/x64.vsprops @@ -2,6 +2,8 @@ <VisualStudioPropertySheet ProjectType="Visual C++" Version="8.00" + OutputDirectory="$(SolutionDir)$(ConfigurationName)64" + IntermediateDirectory="$(SolutionDir)$(ConfigurationName)64\obj\$(ProjectName)" Name="x64" > <Tool diff --git a/WEBKIT_MERGE_REVISION b/WEBKIT_MERGE_REVISION index 92b1f9c..b3ee734 100644 --- a/WEBKIT_MERGE_REVISION +++ b/WEBKIT_MERGE_REVISION @@ -1,5 +1,5 @@ We sync with Chromium release revision, which has both webkit revision and V8 revision. http://src.chromium.org/svn/branches/187/src@18043 - http://svn.webkit.org/repository/webkit/trunk@47029 - http://v8.googlecode.com/svn/branches/bleeding_edge@2780 +http://svn.webkit.org/repository/webkit/trunk@47029 +http://v8.googlecode.com/svn/branches/bleeding_edge@2842 |