summaryrefslogtreecommitdiffstats
path: root/V8Binding
diff options
context:
space:
mode:
authorFeng Qian <fqian@google.com>2009-07-07 15:22:06 -0700
committerFeng Qian <fqian@google.com>2009-07-07 15:22:06 -0700
commit1d1c1cea36258166012386bf4d2f61fa3cf49680 (patch)
tree099169e6b1d1c551d2d43c17a13a82b8f6ece18f /V8Binding
parent32098f282b67ca44ab4d508df370d4c2bf81c2b4 (diff)
downloadexternal_webkit-1d1c1cea36258166012386bf4d2f61fa3cf49680.zip
external_webkit-1d1c1cea36258166012386bf4d2f61fa3cf49680.tar.gz
external_webkit-1d1c1cea36258166012386bf4d2f61fa3cf49680.tar.bz2
Update V8 to r2361, which is a trunk revision pushed to Chrome.
Also changed compilation generation to 1 on ARM, the same change is made into V8 trunk.
Diffstat (limited to 'V8Binding')
-rw-r--r--V8Binding/v8/ChangeLog23
-rwxr-xr-x[-rw-r--r--]V8Binding/v8/benchmarks/run.html0
-rwxr-xr-x[-rw-r--r--]V8Binding/v8/benchmarks/style.css0
-rw-r--r--V8Binding/v8/src/arm/assembler-arm.cc3
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.cc503
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.h6
-rw-r--r--V8Binding/v8/src/arm/disasm-arm.cc23
-rw-r--r--V8Binding/v8/src/arm/ic-arm.cc20
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.cc27
-rw-r--r--V8Binding/v8/src/arm/stub-cache-arm.cc9
-rw-r--r--V8Binding/v8/src/assembler.cc16
-rw-r--r--V8Binding/v8/src/ast.h13
-rw-r--r--V8Binding/v8/src/bootstrapper.cc33
-rw-r--r--V8Binding/v8/src/code-stubs.cc17
-rw-r--r--V8Binding/v8/src/compilation-cache.cc7
-rw-r--r--V8Binding/v8/src/date-delay.js50
-rw-r--r--V8Binding/v8/src/debug-delay.js1
-rw-r--r--V8Binding/v8/src/factory.cc27
-rw-r--r--V8Binding/v8/src/factory.h16
-rw-r--r--V8Binding/v8/src/flag-definitions.h2
-rw-r--r--V8Binding/v8/src/globals.h3
-rw-r--r--V8Binding/v8/src/heap.cc48
-rw-r--r--V8Binding/v8/src/heap.h14
-rw-r--r--V8Binding/v8/src/ia32/ic-ia32.cc20
-rw-r--r--V8Binding/v8/src/ia32/stub-cache-ia32.cc86
-rw-r--r--V8Binding/v8/src/ic.cc82
-rw-r--r--V8Binding/v8/src/interpreter-irregexp.cc80
-rw-r--r--V8Binding/v8/src/log.cc5
-rw-r--r--V8Binding/v8/src/math.js4
-rw-r--r--V8Binding/v8/src/messages.js63
-rw-r--r--V8Binding/v8/src/objects-debug.cc23
-rw-r--r--V8Binding/v8/src/objects-inl.h44
-rw-r--r--V8Binding/v8/src/objects.cc709
-rw-r--r--V8Binding/v8/src/objects.h349
-rw-r--r--V8Binding/v8/src/parser.cc286
-rw-r--r--V8Binding/v8/src/platform-freebsd.cc1
-rw-r--r--V8Binding/v8/src/platform-linux.cc1
-rw-r--r--V8Binding/v8/src/platform-macos.cc171
-rw-r--r--V8Binding/v8/src/platform-win32.cc30
-rw-r--r--V8Binding/v8/src/platform.h7
-rw-r--r--V8Binding/v8/src/property.h2
-rw-r--r--V8Binding/v8/src/runtime.cc86
-rw-r--r--V8Binding/v8/src/runtime.h2
-rw-r--r--V8Binding/v8/src/runtime.js21
-rw-r--r--V8Binding/v8/src/serialize.cc10
-rw-r--r--V8Binding/v8/src/spaces-inl.h23
-rw-r--r--V8Binding/v8/src/spaces.h23
-rw-r--r--V8Binding/v8/src/string.js4
-rw-r--r--V8Binding/v8/src/stub-cache.cc14
-rw-r--r--V8Binding/v8/src/stub-cache.h16
-rw-r--r--V8Binding/v8/src/version.cc4
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.cc18
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.h23
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.cc314
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.cc50
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.h8
-rw-r--r--V8Binding/v8/src/x64/stub-cache-x64.cc6
-rw-r--r--V8Binding/v8/test/cctest/test-api.cc17
-rw-r--r--V8Binding/v8/test/cctest/test-decls.cc4
-rw-r--r--V8Binding/v8/test/cctest/test-regexp.cc4
-rw-r--r--V8Binding/v8/test/mjsunit/call-non-function.js11
-rw-r--r--V8Binding/v8/test/mjsunit/debug-sourceinfo.js704
-rw-r--r--V8Binding/v8/test/mjsunit/div-mod.js95
-rw-r--r--V8Binding/v8/test/mjsunit/fuzz-natives.js4
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-394.js47
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-396.js39
-rw-r--r--V8Binding/v8/test/mjsunit/smi-negative-zero.js84
-rw-r--r--V8Binding/v8/test/mjsunit/stack-traces.js17
-rw-r--r--V8Binding/v8/test/mozilla/mozilla.status3
69 files changed, 2851 insertions, 1624 deletions
diff --git a/V8Binding/v8/ChangeLog b/V8Binding/v8/ChangeLog
index 1306112..ac364f7 100644
--- a/V8Binding/v8/ChangeLog
+++ b/V8Binding/v8/ChangeLog
@@ -1,3 +1,26 @@
+2009-07-06: Version 1.2.12
+
+ Added stack traces collection to Error objects accessible through
+ the e.stack property.
+
+ Changed RegExp parser to use a recursive data structure instead of
+ stack-based recursion.
+
+ Optimized Date object construction and string concatenation.
+
+ Improved performance of div, mod, and mul on ARM platforms.
+
+
+2009-07-02: Version 1.2.11
+
+ Improved performance on IA-32 and ARM.
+
+ Fixed profiler sampler implementation on Mac OS X.
+
+ Changed the representation of global objects to improve
+ performance of adding a lot of new properties.
+
+
2009-06-29: Version 1.2.10
Improved debugger support.
diff --git a/V8Binding/v8/benchmarks/run.html b/V8Binding/v8/benchmarks/run.html
index 050764e..050764e 100644..100755
--- a/V8Binding/v8/benchmarks/run.html
+++ b/V8Binding/v8/benchmarks/run.html
diff --git a/V8Binding/v8/benchmarks/style.css b/V8Binding/v8/benchmarks/style.css
index 46320c1..46320c1 100644..100755
--- a/V8Binding/v8/benchmarks/style.css
+++ b/V8Binding/v8/benchmarks/style.css
diff --git a/V8Binding/v8/src/arm/assembler-arm.cc b/V8Binding/v8/src/arm/assembler-arm.cc
index d168577..a393ac0 100644
--- a/V8Binding/v8/src/arm/assembler-arm.cc
+++ b/V8Binding/v8/src/arm/assembler-arm.cc
@@ -837,6 +837,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ // dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -888,7 +889,7 @@ void Assembler::umull(Register dstL,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+ emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc
index 989a09c..a3176c2 100644
--- a/V8Binding/v8/src/arm/codegen-arm.cc
+++ b/V8Binding/v8/src/arm/codegen-arm.cc
@@ -50,6 +50,11 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
+static void MultiplyByKnownInt(MacroAssembler* masm,
+ Register source,
+ Register destination,
+ int known_int);
+static bool IsEasyToMultiplyBy(int x);
@@ -695,33 +700,69 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode)
- : op_(op), mode_(mode) { }
+ OverwriteMode mode,
+ int constant_rhs = CodeGenerator::kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
private:
Token::Value op_;
OverwriteMode mode_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+
+ static const int kMaxKnownRhs = 0x40000000;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
+ class OpBits: public BitField<Token::Value, 2, 6> {};
+ class KnownIntBits: public BitField<int, 8, 8> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
- | ModeBits::encode(mode_);
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt());
}
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm);
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ return key;
+ }
+
const char* GetName() {
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
case Token::SUB: return "GenericBinaryOpStub_SUB";
case Token::MUL: return "GenericBinaryOpStub_MUL";
case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::MOD: return "GenericBinaryOpStub_MOD";
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
@@ -733,13 +774,22 @@ class GenericBinaryOpStub : public CodeStub {
}
#ifdef DEBUG
- void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); }
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
#endif
};
void CodeGenerator::GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode) {
+ OverwriteMode overwrite_mode,
+ int constant_rhs) {
VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
@@ -750,6 +800,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
case Token::ADD: // fall through.
case Token::SUB: // fall through.
case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
@@ -758,27 +810,11 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
case Token::SAR: {
frame_->EmitPop(r0); // r0 : y
frame_->EmitPop(r1); // r1 : x
- GenericBinaryOpStub stub(op, overwrite_mode);
+ GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
frame_->CallStub(&stub, 0);
break;
}
- case Token::DIV: {
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1));
- frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2);
- break;
- }
-
- case Token::MOD: {
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1));
- frame_->InvokeBuiltin(Builtins::MOD, CALL_JS, &arg_count, 2);
- break;
- }
-
case Token::COMMA:
frame_->EmitPop(r0);
// simply discard left value
@@ -842,6 +878,10 @@ void DeferredInlineSmiOperation::Generate() {
break;
}
+ // For these operations there is no optimistic operation that needs to be
+ // reverted.
+ case Token::MUL:
+ case Token::MOD:
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
@@ -872,11 +912,32 @@ void DeferredInlineSmiOperation::Generate() {
break;
}
- GenericBinaryOpStub stub(op_, overwrite_mode_);
+ GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
__ CallStub(&stub);
}
+static bool PopCountLessThanEqual2(unsigned int x) {
+ x &= x - 1;
+ return (x & (x - 1)) == 0;
+}
+
+
+// Returns the index of the lowest bit set.
+static int BitPosition(unsigned x) {
+ int bit_posn = 0;
+ while ((x & 0xf) == 0) {
+ bit_posn += 4;
+ x >>= 4;
+ }
+ while ((x & 1) == 0) {
+ bit_posn++;
+ x >>= 1;
+ }
+ return bit_posn;
+}
+
+
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
@@ -896,6 +957,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
JumpTarget exit;
frame_->EmitPop(r0);
+ bool something_to_inline = true;
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
@@ -925,6 +987,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
break;
}
+
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
@@ -946,70 +1009,114 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SHR:
case Token::SAR: {
if (reversed) {
- __ mov(ip, Operand(value));
- frame_->EmitPush(ip);
- frame_->EmitPush(r0);
- GenericBinaryOperation(op, mode);
-
- } else {
- int shift_value = int_value & 0x1f; // least significant 5 bits
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, shift_value, false, mode);
- __ tst(r0, Operand(kSmiTagMask));
- deferred->Branch(ne);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
- switch (op) {
- case Token::SHL: {
+ something_to_inline = false;
+ break;
+ }
+ int shift_value = int_value & 0x1f; // least significant 5 bits
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, shift_value, false, mode);
+ __ tst(r0, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
+ switch (op) {
+ case Token::SHL: {
+ if (shift_value != 0) {
__ mov(r2, Operand(r2, LSL, shift_value));
- // check that the *unsigned* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- deferred->Branch(mi);
- break;
}
- case Token::SHR: {
- // LSR by immediate 0 means shifting 32 bits.
- if (shift_value != 0) {
- __ mov(r2, Operand(r2, LSR, shift_value));
- }
- // check that the *unsigned* result fits in a smi
- // neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when
- // smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi
- __ and_(r3, r2, Operand(0xc0000000), SetCC);
- deferred->Branch(ne);
- break;
+ // check that the *unsigned* result fits in a smi
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ deferred->Branch(mi);
+ break;
+ }
+ case Token::SHR: {
+ // LSR by immediate 0 means shifting 32 bits.
+ if (shift_value != 0) {
+ __ mov(r2, Operand(r2, LSR, shift_value));
}
- case Token::SAR: {
- if (shift_value != 0) {
- // ASR by immediate 0 means shifting 32 bits.
- __ mov(r2, Operand(r2, ASR, shift_value));
- }
- break;
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ and_(r3, r2, Operand(0xc0000000), SetCC);
+ deferred->Branch(ne);
+ break;
+ }
+ case Token::SAR: {
+ if (shift_value != 0) {
+ // ASR by immediate 0 means shifting 32 bits.
+ __ mov(r2, Operand(r2, ASR, shift_value));
}
- default: UNREACHABLE();
+ break;
}
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
- deferred->BindExit();
+ default: UNREACHABLE();
}
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ deferred->BindExit();
break;
}
- default:
- if (!reversed) {
- frame_->EmitPush(r0);
- __ mov(r0, Operand(value));
- frame_->EmitPush(r0);
- } else {
- __ mov(ip, Operand(value));
- frame_->EmitPush(ip);
- frame_->EmitPush(r0);
+ case Token::MOD: {
+ if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
+ something_to_inline = false;
+ break;
}
- GenericBinaryOperation(op, mode);
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ unsigned mask = (0x80000000u | kSmiTagMask);
+ __ tst(r0, Operand(mask));
+ deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
+ mask = (int_value << kSmiTagSize) - 1;
+ __ and_(r0, r0, Operand(mask));
+ deferred->BindExit();
+ break;
+ }
+
+ case Token::MUL: {
+ if (!IsEasyToMultiplyBy(int_value)) {
+ something_to_inline = false;
+ break;
+ }
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
+ max_smi_that_wont_overflow <<= kSmiTagSize;
+ unsigned mask = 0x80000000u;
+ while ((mask & max_smi_that_wont_overflow) == 0) {
+ mask |= mask >> 1;
+ }
+ mask |= kSmiTagMask;
+ // This does a single mask that checks for a too high value in a
+ // conservative way and for a non-Smi. It also filters out negative
+ // numbers, unfortunately, but since this code is inline we prefer
+ // brevity to comprehensiveness.
+ __ tst(r0, Operand(mask));
+ deferred->Branch(ne);
+ MultiplyByKnownInt(masm_, r0, r0, int_value);
+ deferred->BindExit();
+ break;
+ }
+
+ default:
+ something_to_inline = false;
break;
}
+ if (!something_to_inline) {
+ if (!reversed) {
+ frame_->EmitPush(r0);
+ __ mov(r0, Operand(value));
+ frame_->EmitPush(r0);
+ GenericBinaryOperation(op, mode, int_value);
+ } else {
+ __ mov(ip, Operand(value));
+ frame_->EmitPush(ip);
+ frame_->EmitPush(r0);
+ GenericBinaryOperation(op, mode, kUnknownIntValue);
+ }
+ }
+
exit.Bind();
}
@@ -3176,9 +3283,60 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0)); // Load the object.
- frame_->CallRuntime(Runtime::kClassOf, 1);
+ JumpTarget leave, null, function, non_function_constructor;
+
+ // Load the object into r0.
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+
+ // If the object is a smi, we return null.
+ __ tst(r0, Operand(kSmiTagMask));
+ null.Branch(eq);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
+ null.Branch(lt);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+ function.Branch(eq);
+
+ // Check if the constructor in the map is a function.
+ __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ non_function_constructor.Branch(ne);
+
+ // The r0 register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
frame_->EmitPush(r0);
+ leave.Jump();
+
+ // Functions have class 'Function'.
+ function.Bind();
+ __ mov(r0, Operand(Factory::function_class_symbol()));
+ frame_->EmitPush(r0);
+ leave.Jump();
+
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ __ mov(r0, Operand(Factory::Object_symbol()));
+ frame_->EmitPush(r0);
+ leave.Jump();
+
+ // Non-JS objects have class null.
+ null.Bind();
+ __ mov(r0, Operand(Factory::null_value()));
+ frame_->EmitPush(r0);
+
+ // All done.
+ leave.Bind();
}
@@ -3257,7 +3415,7 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+ __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
cc_reg_ = eq;
}
@@ -4303,7 +4461,7 @@ static void CountLeadingZeros(
__ add(zeros, zeros, Operand(2), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
// Top bit.
- __ tst(scratch, Operand(0x80000000));
+ __ tst(scratch, Operand(0x80000000u));
__ add(zeros, zeros, Operand(1), LeaveCC, eq);
#endif
}
@@ -4455,7 +4613,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
// We test for the special value that has a different exponent. This test
// has the neat side effect of setting the flags according to the sign.
ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000));
+ __ cmp(the_int_, Operand(0x80000000u));
__ b(eq, &max_negative_int);
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
@@ -5269,6 +5427,85 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
}
+// Can we multiply by x with max two shifts and an add.
+// This answers yes to all integers from 2 to 10.
+static bool IsEasyToMultiplyBy(int x) {
+ if (x < 2) return false; // Avoid special cases.
+ if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
+ if (IsPowerOf2(x)) return true; // Simple shift.
+ if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
+ if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
+ return false;
+}
+
+
+// Can multiply by anything that IsEasyToMultiplyBy returns true for.
+// Source and destination may be the same register. This routine does
+// not set carry and overflow the way a mul instruction would.
+static void MultiplyByKnownInt(MacroAssembler* masm,
+ Register source,
+ Register destination,
+ int known_int) {
+ if (IsPowerOf2(known_int)) {
+ __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
+ } else if (PopCountLessThanEqual2(known_int)) {
+ int first_bit = BitPosition(known_int);
+ int second_bit = BitPosition(known_int ^ (1 << first_bit));
+ __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
+ if (first_bit != 0) {
+ __ mov(destination, Operand(destination, LSL, first_bit));
+ }
+ } else {
+ ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
+ int the_bit = BitPosition(known_int + 1);
+ __ rsb(destination, source, Operand(source, LSL, the_bit));
+ }
+}
+
+
+// This function (as opposed to MultiplyByKnownInt) takes the known int in a
+// a register for the cases where it doesn't know a good trick, and may deliver
+// a result that needs shifting.
+static void MultiplyByKnownInt2(
+ MacroAssembler* masm,
+ Register result,
+ Register source,
+ Register known_int_register, // Smi tagged.
+ int known_int,
+ int* required_shift) { // Including Smi tag shift
+ switch (known_int) {
+ case 3:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 1;
+ break;
+ case 5:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 1;
+ break;
+ case 6:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 2;
+ break;
+ case 7:
+ __ rsb(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 9:
+ __ add(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 10:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 2;
+ break;
+ default:
+ ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
+ __ mul(result, source, known_int_register);
+ *required_shift = 0;
+ }
+}
+
+
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@@ -5334,14 +5571,114 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ tst(r3, Operand(r3));
__ mov(r0, Operand(r3), LeaveCC, ne);
__ Ret(ne);
- // Slow case.
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ add(r2, r0, Operand(r1), SetCC);
+ __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
+ // Slow case. We fall through here if we multiplied a negative number
+ // with 0, because that would mean we should produce -0.
__ bind(&slow);
HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::MUL,
Token::MUL,
- mode_);
+ mode_);
+ break;
+ }
+
+ case Token::DIV:
+ case Token::MOD: {
+ Label not_smi;
+ if (specialized_on_rhs_) {
+ Label smi_is_unsuitable;
+ __ BranchOnNotSmi(r1, &not_smi);
+ if (IsPowerOf2(constant_rhs_)) {
+ if (op_ == Token::MOD) {
+ __ and_(r0,
+ r1,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
+ SetCC);
+ // We now have the answer, but if the input was negative we also
+ // have the sign bit. Our work is done if the result is
+ // positive or zero:
+ __ Ret(pl);
+ // A mod of a negative left hand side must return a negative number.
+ // Unfortunately if the answer is 0 then we must return -0. And we
+ // already optimistically trashed r0 so we may need to restore it.
+ __ eor(r0, r0, Operand(0x80000000u), SetCC);
+ // Next two instructions are conditional on the answer being -0.
+ __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
+ __ b(eq, &smi_is_unsuitable);
+ // We need to subtract the dividend. Eg. -3 % 4 == -3.
+ __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
+ } else {
+ ASSERT(op_ == Token::DIV);
+ __ tst(r1,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
+ __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
+ int shift = 0;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ d >>= 1;
+ shift++;
+ }
+ __ mov(r0, Operand(r1, LSR, shift));
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ }
+ } else {
+ // Not a power of 2.
+ __ tst(r1, Operand(0x80000000u));
+ __ b(ne, &smi_is_unsuitable);
+ // Find a fixed point reciprocal of the divisor so we can divide by
+ // multiplying.
+ double divisor = 1.0 / constant_rhs_;
+ int shift = 32;
+ double scale = 4294967296.0; // 1 << 32.
+ uint32_t mul;
+ // Maximise the precision of the fixed point reciprocal.
+ while (true) {
+ mul = static_cast<uint32_t>(scale * divisor);
+ if (mul >= 0x7fffffff) break;
+ scale *= 2.0;
+ shift++;
+ }
+ mul++;
+ __ mov(r2, Operand(mul));
+ __ umull(r3, r2, r2, r1);
+ __ mov(r2, Operand(r2, LSR, shift - 31));
+ // r2 is r1 / rhs. r2 is not Smi tagged.
+ // r0 is still the known rhs. r0 is Smi tagged.
+ // r1 is still the unkown lhs. r1 is Smi tagged.
+ int required_r4_shift = 0; // Including the Smi tag shift of 1.
+ // r4 = r2 * r0.
+ MultiplyByKnownInt2(masm,
+ r4,
+ r2,
+ r0,
+ constant_rhs_,
+ &required_r4_shift);
+ // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
+ if (op_ == Token::DIV) {
+ __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
+ __ b(ne, &smi_is_unsuitable); // There was a remainder.
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ } else {
+ ASSERT(op_ == Token::MOD);
+ __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
+ }
+ }
+ __ Ret();
+ __ bind(&smi_is_unsuitable);
+ } else {
+ __ jmp(&not_smi);
+ }
+ HandleBinaryOpSlowCases(masm,
+ &not_smi,
+ op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
+ op_,
+ mode_);
break;
}
@@ -5366,7 +5703,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ and_(r2, r2, Operand(0x1f));
__ mov(r0, Operand(r1, ASR, r2));
// Smi tag result.
- __ and_(r0, r0, Operand(~kSmiTagMask));
+ __ bic(r0, r0, Operand(kSmiTagMask));
break;
case Token::SHR:
// Remove tags from operands. We can't do this on a 31 bit number
diff --git a/V8Binding/v8/src/arm/codegen-arm.h b/V8Binding/v8/src/arm/codegen-arm.h
index 7760e47..6391a8e 100644
--- a/V8Binding/v8/src/arm/codegen-arm.h
+++ b/V8Binding/v8/src/arm/codegen-arm.h
@@ -186,6 +186,8 @@ class CodeGenerator: public AstVisitor {
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+ static const int kUnknownIntValue = -1;
+
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@@ -291,7 +293,9 @@ class CodeGenerator: public AstVisitor {
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
- void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode);
+ void GenericBinaryOperation(Token::Value op,
+ OverwriteMode overwrite_mode,
+ int known_rhs = kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
diff --git a/V8Binding/v8/src/arm/disasm-arm.cc b/V8Binding/v8/src/arm/disasm-arm.cc
index 588732b..c55a958 100644
--- a/V8Binding/v8/src/arm/disasm-arm.cc
+++ b/V8Binding/v8/src/arm/disasm-arm.cc
@@ -438,7 +438,19 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
- if (instr->Bit(22) == 1) {
+ // The manual gets the meaning of bit 22 backwards in the multiply
+ // instruction overview on page A3.16.2. The instructions that
+ // exist in u and s variants are the following:
+ // smull A4.1.87
+ // umull A4.1.129
+ // umlal A4.1.128
+ // smlal A4.1.76
+ // For these 0 means u and 1 means s. As can be seen on their individual
+ // pages. The other 18 mul instructions have the bit set or unset in
+ // arbitrary ways that are unrelated to the signedness of the instruction.
+ // None of these 18 instructions exist in both a 'u' and an 's' variant.
+
+ if (instr->Bit(22) == 0) {
Print("u");
} else {
Print("s");
@@ -494,11 +506,16 @@ void Decoder::DecodeType01(Instr* instr) {
// multiply instructions
if (instr->Bit(23) == 0) {
if (instr->Bit(21) == 0) {
- Format(instr, "mul'cond's 'rd, 'rm, 'rs");
+ // Mul calls it Rd. Everyone else calls it Rn.
+ Format(instr, "mul'cond's 'rn, 'rm, 'rs");
} else {
- Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
+ // In the manual the order is rd, rm, rs, rn. But mla swaps the
+ // positions of rn and rd in the encoding.
+ Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
}
} else {
+ // In the manual the order is RdHi, RdLo, Rm, Rs.
+ // RdHi is what other instructions call Rn and RdLo is Rd.
Format(instr, "'um'al'cond's 'rn, 'rd, 'rm, 'rs");
}
} else {
diff --git a/V8Binding/v8/src/arm/ic-arm.cc b/V8Binding/v8/src/arm/ic-arm.cc
index 5519771..07c767e 100644
--- a/V8Binding/v8/src/arm/ic-arm.cc
+++ b/V8Binding/v8/src/arm/ic-arm.cc
@@ -72,9 +72,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Jump to miss if the interceptor bit is set.
__ b(ne, miss);
- // Bail out if we have a JS global object.
+ // Bail out if we have a JS global proxy object.
__ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, miss);
// Check that the properties array is a dictionary.
@@ -85,13 +85,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Compute the capacity mask.
const int kCapacityOffset =
- Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize;
+ Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
__ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
__ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
__ sub(r3, r3, Operand(1));
const int kElementsStartOffset =
- Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize;
+ Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes
@@ -102,12 +102,12 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
__ mov(t1, Operand(t1, LSR, String::kHashShift));
if (i > 0) {
- __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
+ __ add(t1, t1, Operand(StringDictionary::GetProbeOffset(i)));
}
__ and_(t1, t1, Operand(r3));
// Scale the index by multiplying by the element size.
- ASSERT(Dictionary::kElementSize == 3);
+ ASSERT(StringDictionary::kEntrySize == 3);
__ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3
// Check if the key is identical to the name.
@@ -217,7 +217,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -254,7 +254,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
__ bind(&miss);
@@ -422,7 +422,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, r0, r2, r3);
+ StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
@@ -761,7 +761,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
diff --git a/V8Binding/v8/src/arm/simulator-arm.cc b/V8Binding/v8/src/arm/simulator-arm.cc
index 53dbec9..1b42919 100644
--- a/V8Binding/v8/src/arm/simulator-arm.cc
+++ b/V8Binding/v8/src/arm/simulator-arm.cc
@@ -1080,41 +1080,44 @@ void Simulator::DecodeType01(Instr* instr) {
// multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) {
if (instr->Bit(24) == 0) {
- // multiply instructions
- int rd = instr->RdField();
+ // Multiply instructions have Rd in a funny place.
+ int rd = instr->RnField();
int rm = instr->RmField();
int rs = instr->RsField();
int32_t rs_val = get_register(rs);
int32_t rm_val = get_register(rm);
if (instr->Bit(23) == 0) {
if (instr->Bit(21) == 0) {
- // Format(instr, "mul'cond's 'rd, 'rm, 'rs");
+ // Format(instr, "mul'cond's 'rn, 'rm, 'rs");
int32_t alu_out = rm_val * rs_val;
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
}
} else {
- Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
+ UNIMPLEMENTED(); // mla is not used by V8.
}
} else {
// Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
- int rn = instr->RnField();
+ int rd_lo = instr->RdField();
int32_t hi_res = 0;
int32_t lo_res = 0;
- if (instr->Bit(22) == 0) {
- // signed multiply
- UNIMPLEMENTED();
+ if (instr->Bit(22) == 1) {
+ int64_t left_op = static_cast<int32_t>(rm_val);
+ int64_t right_op = static_cast<int32_t>(rs_val);
+ uint64_t result = left_op * right_op;
+ hi_res = static_cast<int32_t>(result >> 32);
+ lo_res = static_cast<int32_t>(result & 0xffffffff);
} else {
// unsigned multiply
- uint64_t left_op = rm_val;
- uint64_t right_op = rs_val;
+ uint64_t left_op = static_cast<uint32_t>(rm_val);
+ uint64_t right_op = static_cast<uint32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
lo_res = static_cast<int32_t>(result & 0xffffffff);
}
- set_register(rn, hi_res);
- set_register(rd, lo_res);
+ set_register(rd_lo, lo_res);
+ set_register(rd, hi_res);
if (instr->HasS()) {
UNIMPLEMENTED();
}
diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc
index 71f2225..e3e5502 100644
--- a/V8Binding/v8/src/arm/stub-cache-arm.cc
+++ b/V8Binding/v8/src/arm/stub-cache-arm.cc
@@ -85,7 +85,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
- Register scratch) {
+ Register scratch,
+ Register extra) {
Label miss;
// Make sure that code is valid. The shifting code relies on the
@@ -684,7 +685,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
-Object* CallStubCompiler::CompileCallGlobal(JSGlobalObject* object,
+Object* CallStubCompiler::CompileCallGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
@@ -879,7 +880,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-Object* StoreStubCompiler::CompileStoreGlobal(JSGlobalObject* object,
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
// ----------- S t a t e -------------
@@ -1012,7 +1013,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadGlobal(JSGlobalObject* object,
+Object* LoadStubCompiler::CompileLoadGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
diff --git a/V8Binding/v8/src/assembler.cc b/V8Binding/v8/src/assembler.cc
index 9497be8..5d0310d 100644
--- a/V8Binding/v8/src/assembler.cc
+++ b/V8Binding/v8/src/assembler.cc
@@ -608,6 +608,16 @@ static double mul_two_doubles(double x, double y) {
}
+static double div_two_doubles(double x, double y) {
+ return x / y;
+}
+
+
+static double mod_two_doubles(double x, double y) {
+ return fmod(x, y);
+}
+
+
static int native_compare_doubles(double x, double y) {
if (x == y) return 0;
return x < y ? 1 : -1;
@@ -628,6 +638,12 @@ ExternalReference ExternalReference::double_fp_operation(
case Token::MUL:
function = &mul_two_doubles;
break;
+ case Token::DIV:
+ function = &div_two_doubles;
+ break;
+ case Token::MOD:
+ function = &mod_two_doubles;
+ break;
default:
UNREACHABLE();
}
diff --git a/V8Binding/v8/src/ast.h b/V8Binding/v8/src/ast.h
index 15d762f..64d61cc 100644
--- a/V8Binding/v8/src/ast.h
+++ b/V8Binding/v8/src/ast.h
@@ -1575,16 +1575,10 @@ class RegExpQuantifier: public RegExpTree {
};
-enum CaptureAvailability {
- CAPTURE_AVAILABLE,
- CAPTURE_UNREACHABLE,
- CAPTURE_PERMANENTLY_UNREACHABLE
-};
-
class RegExpCapture: public RegExpTree {
public:
explicit RegExpCapture(RegExpTree* body, int index)
- : body_(body), index_(index), available_(CAPTURE_AVAILABLE) { }
+ : body_(body), index_(index) { }
virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success);
@@ -1600,16 +1594,11 @@ class RegExpCapture: public RegExpTree {
virtual int max_match() { return body_->max_match(); }
RegExpTree* body() { return body_; }
int index() { return index_; }
- inline CaptureAvailability available() { return available_; }
- inline void set_available(CaptureAvailability availability) {
- available_ = availability;
- }
static int StartRegister(int index) { return index * 2; }
static int EndRegister(int index) { return index * 2 + 1; }
private:
RegExpTree* body_;
int index_;
- CaptureAvailability available_;
};
diff --git a/V8Binding/v8/src/bootstrapper.cc b/V8Binding/v8/src/bootstrapper.cc
index ffd432a..8ef4956 100644
--- a/V8Binding/v8/src/bootstrapper.cc
+++ b/V8Binding/v8/src/bootstrapper.cc
@@ -539,7 +539,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
{ // --- G l o b a l ---
// Step 1: create a fresh inner JSGlobalObject
- Handle<JSGlobalObject> object;
+ Handle<GlobalObject> object;
{
Handle<JSFunction> js_global_function;
Handle<ObjectTemplateInfo> js_global_template;
@@ -579,8 +579,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
}
js_global_function->initial_map()->set_is_hidden_prototype();
- SetExpectedNofProperties(js_global_function, 100);
- object = Factory::NewJSGlobalObject(js_global_function);
+ object = Factory::NewGlobalObject(js_global_function);
}
// Set the global context for the global object.
@@ -962,12 +961,10 @@ bool Genesis::InstallNatives() {
Handle<String> name = Factory::LookupAsciiSymbol("builtins");
builtins_fun->shared()->set_instance_class_name(*name);
- SetExpectedNofProperties(builtins_fun, 100);
// Allocate the builtins object.
Handle<JSBuiltinsObject> builtins =
- Handle<JSBuiltinsObject>::cast(Factory::NewJSObject(builtins_fun,
- TENURED));
+ Handle<JSBuiltinsObject>::cast(Factory::NewGlobalObject(builtins_fun));
builtins->set_builtins(*builtins);
builtins->set_global_context(*global_context());
builtins->set_global_receiver(*builtins);
@@ -1190,10 +1187,6 @@ bool Genesis::InstallNatives() {
apply->shared()->set_length(2);
}
- // Make sure that the builtins object has fast properties.
- // If the ASSERT below fails, please increase the expected number of
- // properties for the builtins object.
- ASSERT(builtins->HasFastProperties());
#ifdef DEBUG
builtins->Verify();
#endif
@@ -1213,15 +1206,13 @@ bool Genesis::InstallSpecialObjects() {
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
- if (FLAG_capture_stack_traces) {
- Handle<Object> Error = GetProperty(js_global, "Error");
- if (Error->IsJSObject()) {
- Handle<String> name = Factory::LookupAsciiSymbol("captureStackTraces");
- SetProperty(Handle<JSObject>::cast(Error),
- name,
- Factory::true_value(),
- NONE);
- }
+ Handle<Object> Error = GetProperty(js_global, "Error");
+ if (Error->IsJSObject()) {
+ Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
+ SetProperty(Handle<JSObject>::cast(Error),
+ name,
+ Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
+ NONE);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1441,8 +1432,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
}
} else {
- Handle<Dictionary> properties =
- Handle<Dictionary>(from->property_dictionary());
+ Handle<StringDictionary> properties =
+ Handle<StringDictionary>(from->property_dictionary());
int capacity = properties->Capacity();
for (int i = 0; i < capacity; i++) {
Object* raw_key(properties->KeyAt(i));
diff --git a/V8Binding/v8/src/code-stubs.cc b/V8Binding/v8/src/code-stubs.cc
index ee60332..37bc707 100644
--- a/V8Binding/v8/src/code-stubs.cc
+++ b/V8Binding/v8/src/code-stubs.cc
@@ -37,8 +37,8 @@ namespace internal {
Handle<Code> CodeStub::GetCode() {
uint32_t key = GetKey();
- int index = Heap::code_stubs()->FindNumberEntry(key);
- if (index == -1) {
+ int index = Heap::code_stubs()->FindEntry(key);
+ if (index == NumberDictionary::kNotFound) {
HandleScope scope;
// Update the static counter each time a new code stub is generated.
@@ -80,14 +80,15 @@ Handle<Code> CodeStub::GetCode() {
#endif
// Update the dictionary and the root in Heap.
- Handle<Dictionary> dict =
- Factory::DictionaryAtNumberPut(Handle<Dictionary>(Heap::code_stubs()),
- key,
- code);
+ Handle<NumberDictionary> dict =
+ Factory::DictionaryAtNumberPut(
+ Handle<NumberDictionary>(Heap::code_stubs()),
+ key,
+ code);
Heap::set_code_stubs(*dict);
- index = Heap::code_stubs()->FindNumberEntry(key);
+ index = Heap::code_stubs()->FindEntry(key);
}
- ASSERT(index != -1);
+ ASSERT(index != NumberDictionary::kNotFound);
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
}
diff --git a/V8Binding/v8/src/compilation-cache.cc b/V8Binding/v8/src/compilation-cache.cc
index fd706af..535b843 100644
--- a/V8Binding/v8/src/compilation-cache.cc
+++ b/V8Binding/v8/src/compilation-cache.cc
@@ -37,10 +37,17 @@ namespace internal {
static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
+#if V8_TARGET_ARCH_ARM
+static const int kScriptGenerations = 1;
+static const int kEvalGlobalGenerations = 1;
+static const int kEvalContextualGenerations = 1;
+static const int kRegExpGenerations = 1;
+#else
static const int kScriptGenerations = 5;
static const int kEvalGlobalGenerations = 2;
static const int kEvalContextualGenerations = 2;
static const int kRegExpGenerations = 2;
+#endif
// Initial of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
diff --git a/V8Binding/v8/src/date-delay.js b/V8Binding/v8/src/date-delay.js
index 5a109c6..3414cb9 100644
--- a/V8Binding/v8/src/date-delay.js
+++ b/V8Binding/v8/src/date-delay.js
@@ -47,7 +47,7 @@ function ThrowDateTypeError() {
// ECMA 262 - 15.9.1.2
function Day(time) {
- return FLOOR(time/msPerDay);
+ return FLOOR(time / msPerDay);
}
@@ -428,29 +428,33 @@ function TimeClip(time) {
%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
- if (%_IsConstructCall()) {
- // ECMA 262 - 15.9.3
- var argc = %_ArgumentsLength();
- if (argc == 0) {
- %_SetValueOf(this, %DateCurrentTime());
- return;
- }
- if (argc == 1) {
+ if (!%_IsConstructCall()) {
+ // ECMA 262 - 15.9.2
+ return (new $Date()).toString();
+ }
+
+ // ECMA 262 - 15.9.3
+ var argc = %_ArgumentsLength();
+ var value;
+ if (argc == 0) {
+ value = %DateCurrentTime();
+
+ } else if (argc == 1) {
+ if (IS_NUMBER(year)) {
+ value = TimeClip(year);
+ } else {
// According to ECMA 262, no hint should be given for this
- // conversion. However, ToPrimitive defaults to String Hint
- // for Date objects which will lose precision when the Date
+ // conversion. However, ToPrimitive defaults to STRING_HINT for
+ // Date objects which will lose precision when the Date
// constructor is called with another Date object as its
- // argument. We therefore use Number Hint for the conversion
- // (which is the default for everything else than Date
- // objects). This makes us behave like KJS and SpiderMonkey.
+ // argument. We therefore use NUMBER_HINT for the conversion,
+ // which is the default for everything else than Date objects.
+ // This makes us behave like KJS and SpiderMonkey.
var time = ToPrimitive(year, NUMBER_HINT);
- if (IS_STRING(time)) {
- %_SetValueOf(this, DateParse(time));
- } else {
- %_SetValueOf(this, TimeClip(ToNumber(time)));
- }
- return;
+ value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time));
}
+
+ } else {
year = ToNumber(year);
month = ToNumber(month);
date = argc > 2 ? ToNumber(date) : 1;
@@ -462,11 +466,9 @@ function TimeClip(time) {
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, month, date);
var time = MakeTime(hours, minutes, seconds, ms);
- %_SetValueOf(this, TimeClip(UTC(MakeDate(day, time))));
- } else {
- // ECMA 262 - 15.9.2
- return (new $Date()).toString();
+ value = TimeClip(UTC(MakeDate(day, time)));
}
+ %_SetValueOf(this, value);
});
diff --git a/V8Binding/v8/src/debug-delay.js b/V8Binding/v8/src/debug-delay.js
index 423a118..857c554 100644
--- a/V8Binding/v8/src/debug-delay.js
+++ b/V8Binding/v8/src/debug-delay.js
@@ -991,7 +991,6 @@ CompileEvent.prototype.toJSONProtocol = function() {
}
o.body = {};
o.body.script = this.script_;
- o.setOption('includeSource', true);
return o.toJSONProtocol();
}
diff --git a/V8Binding/v8/src/factory.cc b/V8Binding/v8/src/factory.cc
index bc48ebf..4d7a957 100644
--- a/V8Binding/v8/src/factory.cc
+++ b/V8Binding/v8/src/factory.cc
@@ -49,9 +49,17 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size) {
}
-Handle<Dictionary> Factory::NewDictionary(int at_least_space_for) {
+Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
- CALL_HEAP_FUNCTION(Dictionary::Allocate(at_least_space_for), Dictionary);
+ CALL_HEAP_FUNCTION(StringDictionary::Allocate(at_least_space_for),
+ StringDictionary);
+}
+
+
+Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(NumberDictionary::Allocate(at_least_space_for),
+ NumberDictionary);
}
@@ -619,10 +627,10 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
-Handle<JSGlobalObject> Factory::NewJSGlobalObject(
+Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(Heap::AllocateJSGlobalObject(*constructor),
- JSGlobalObject);
+ CALL_HEAP_FUNCTION(Heap::AllocateGlobalObject(*constructor),
+ GlobalObject);
}
@@ -655,10 +663,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
}
-Handle<Dictionary> Factory::DictionaryAtNumberPut(Handle<Dictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), Dictionary);
+Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
+ Handle<NumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), NumberDictionary);
}
diff --git a/V8Binding/v8/src/factory.h b/V8Binding/v8/src/factory.h
index 40cf578..90fb29c 100644
--- a/V8Binding/v8/src/factory.h
+++ b/V8Binding/v8/src/factory.h
@@ -47,7 +47,9 @@ class Factory : public AllStatic {
// Allocate a new fixed array with non-existing entries (the hole).
static Handle<FixedArray> NewFixedArrayWithHoles(int size);
- static Handle<Dictionary> NewDictionary(int at_least_space_for);
+ static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
+
+ static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
@@ -183,9 +185,8 @@ class Factory : public AllStatic {
static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
- // JS global objects are pretenured.
- static Handle<JSGlobalObject> NewJSGlobalObject(
- Handle<JSFunction> constructor);
+ // Global objects are pretenured.
+ static Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
@@ -314,9 +315,10 @@ class Factory : public AllStatic {
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
- static Handle<Dictionary> DictionaryAtNumberPut(Handle<Dictionary>,
- uint32_t key,
- Handle<Object> value);
+ static Handle<NumberDictionary> DictionaryAtNumberPut(
+ Handle<NumberDictionary>,
+ uint32_t key,
+ Handle<Object> value);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
diff --git a/V8Binding/v8/src/flag-definitions.h b/V8Binding/v8/src/flag-definitions.h
index 983fe22..814b2c4 100644
--- a/V8Binding/v8/src/flag-definitions.h
+++ b/V8Binding/v8/src/flag-definitions.h
@@ -110,7 +110,7 @@ DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_string(natives_file, NULL, "alternative natives file")
DEFINE_bool(expose_gc, false, "expose gc extension")
-DEFINE_bool(capture_stack_traces, false, "capture stack traces")
+DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
diff --git a/V8Binding/v8/src/globals.h b/V8Binding/v8/src/globals.h
index bf83d0d..8088331 100644
--- a/V8Binding/v8/src/globals.h
+++ b/V8Binding/v8/src/globals.h
@@ -198,7 +198,8 @@ class FixedArray;
class FunctionEntry;
class FunctionLiteral;
class FunctionTemplateInfo;
-class Dictionary;
+class NumberDictionary;
+class StringDictionary;
class FreeStoreAllocationPolicy;
template <typename T> class Handle;
class Heap;
diff --git a/V8Binding/v8/src/heap.cc b/V8Binding/v8/src/heap.cc
index a29340c..749013a 100644
--- a/V8Binding/v8/src/heap.cc
+++ b/V8Binding/v8/src/heap.cc
@@ -221,6 +221,7 @@ void Heap::ReportStatisticsAfterGC() {
// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
if (FLAG_heap_stats) {
+ new_space_.CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
@@ -428,22 +429,8 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
-
- // If we have used the mark-compact collector to collect the new
- // space, and it has not compacted the new space, we force a
- // separate scavenge collection. This is a hack. It covers the
- // case where (1) a new space collection was requested, (2) the
- // collector selection policy selected the mark-compact collector,
- // and (3) the mark-compact collector policy selected not to
- // compact the new space. In that case, there is no more (usable)
- // free space in the new space after the collection compared to
- // before.
- if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) {
- Scavenge();
- }
- } else {
- Scavenge();
}
+ Scavenge();
Counters::objs_since_last_young.Set(0);
PostGarbageCollectionProcessing();
@@ -1405,14 +1392,14 @@ bool Heap::CreateInitialObjects() {
prototype_accessors_ = Proxy::cast(obj);
// Allocate the code_stubs dictionary.
- obj = Dictionary::Allocate(4);
+ obj = NumberDictionary::Allocate(4);
if (obj->IsFailure()) return false;
- code_stubs_ = Dictionary::cast(obj);
+ code_stubs_ = NumberDictionary::cast(obj);
// Allocate the non_monomorphic_cache used in stub-cache.cc
- obj = Dictionary::Allocate(4);
+ obj = NumberDictionary::Allocate(4);
if (obj->IsFailure()) return false;
- non_monomorphic_cache_ = Dictionary::cast(obj);
+ non_monomorphic_cache_ = NumberDictionary::cast(obj);
CreateFixedStubs();
@@ -1558,7 +1545,7 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Object* Heap::AllocateSharedFunctionInfo(Object* name) {
- Object* result = Allocate(shared_function_info_map(), NEW_SPACE);
+ Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
@@ -2050,7 +2037,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
- Object* properties = AllocateFixedArray(prop_size);
+ Object* properties = AllocateFixedArray(prop_size, pretenure);
if (properties->IsFailure()) return properties;
// Allocate the JSObject.
@@ -2080,19 +2067,24 @@ Object* Heap::AllocateJSObject(JSFunction* constructor,
// Allocate the object based on the constructors initial map.
Object* result =
AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
- // Make sure result is NOT a JS global object if valid.
- ASSERT(result->IsFailure() || !result->IsJSGlobalObject());
+ // Make sure result is NOT a global object if valid.
+ ASSERT(result->IsFailure() || !result->IsGlobalObject());
return result;
}
-Object* Heap::AllocateJSGlobalObject(JSFunction* constructor) {
+Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(constructor->has_initial_map());
// Make sure no field properties are described in the initial map.
// This guarantees us that normalizing the properties does not
// require us to change property values to JSGlobalPropertyCells.
ASSERT(constructor->initial_map()->NextFreePropertyIndex() == 0);
+ // Make sure we don't have a ton of pre-allocated slots in the
+ // global objects. They will be unused once we normalize the object.
+ ASSERT(constructor->initial_map()->unused_property_fields() == 0);
+ ASSERT(constructor->initial_map()->inobject_properties() == 0);
+
// Allocate the object based on the constructors initial map.
Object* result = AllocateJSObjectFromMap(constructor->initial_map(), TENURED);
if (result->IsFailure()) return result;
@@ -2102,8 +2094,8 @@ Object* Heap::AllocateJSGlobalObject(JSFunction* constructor) {
result = global->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
if (result->IsFailure()) return result;
- // Make sure result is a JS global object with properties in dictionary.
- ASSERT(global->IsJSGlobalObject());
+ // Make sure result is a global object with properties in dictionary.
+ ASSERT(global->IsGlobalObject());
ASSERT(!global->HasFastProperties());
return global;
}
@@ -2182,7 +2174,7 @@ Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
- Object* properties = AllocateFixedArray(prop_size);
+ Object* properties = AllocateFixedArray(prop_size, TENURED);
if (properties->IsFailure()) return properties;
// Reset the map for the object.
@@ -2571,7 +2563,7 @@ Object* Heap::AllocateHashTable(int length) {
Object* result = Heap::AllocateFixedArray(length);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(hash_table_map());
- ASSERT(result->IsDictionary());
+ ASSERT(result->IsHashTable());
return result;
}
diff --git a/V8Binding/v8/src/heap.h b/V8Binding/v8/src/heap.h
index 3667348..9f61ce2 100644
--- a/V8Binding/v8/src/heap.h
+++ b/V8Binding/v8/src/heap.h
@@ -118,8 +118,8 @@ namespace internal {
V(Map, neander_map) \
V(JSObject, message_listeners) \
V(Proxy, prototype_accessors) \
- V(Dictionary, code_stubs) \
- V(Dictionary, non_monomorphic_cache) \
+ V(NumberDictionary, code_stubs) \
+ V(NumberDictionary, non_monomorphic_cache) \
V(Code, js_entry_code) \
V(Code, js_construct_entry_code) \
V(Code, c_entry_code) \
@@ -289,11 +289,11 @@ class Heap : public AllStatic {
static Object* AllocateJSObject(JSFunction* constructor,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates and initializes a new JS global object based on a constructor.
+ // Allocates and initializes a new global object based on a constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateJSGlobalObject(JSFunction* constructor);
+ static Object* AllocateGlobalObject(JSFunction* constructor);
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
@@ -692,10 +692,10 @@ class Heap : public AllStatic {
static inline AllocationSpace TargetSpaceId(InstanceType type);
// Sets the stub_cache_ (only used when expanding the dictionary).
- static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
+ static void set_code_stubs(NumberDictionary* value) { code_stubs_ = value; }
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- static void set_non_monomorphic_cache(Dictionary* value) {
+ static void set_non_monomorphic_cache(NumberDictionary* value) {
non_monomorphic_cache_ = value;
}
@@ -1063,9 +1063,11 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
HeapObject* object = HeapObject::cast(*current);
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
+#ifndef V8_TARGET_ARCH_X64
if (Heap::InNewSpace(object)) {
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
}
+#endif
}
}
}
diff --git a/V8Binding/v8/src/ia32/ic-ia32.cc b/V8Binding/v8/src/ia32/ic-ia32.cc
index 004dad2..97de4da 100644
--- a/V8Binding/v8/src/ia32/ic-ia32.cc
+++ b/V8Binding/v8/src/ia32/ic-ia32.cc
@@ -70,7 +70,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Jump to miss if the interceptor bit is set.
__ j(not_zero, miss_label, not_taken);
- // Bail out if we have a JS global object.
+ // Bail out if we have a JS global proxy object.
__ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
__ cmp(r0, JS_GLOBAL_PROXY_TYPE);
__ j(equal, miss_label, not_taken);
@@ -83,7 +83,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Compute the capacity mask.
const int kCapacityOffset =
- Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize;
+ Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
__ mov(r2, FieldOperand(r0, kCapacityOffset));
__ shr(r2, kSmiTagSize); // convert smi to int
__ dec(r2);
@@ -93,18 +93,18 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// cover ~93% of loads from dictionaries.
static const int kProbes = 4;
const int kElementsStartOffset =
- Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize;
+ Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kLengthOffset));
__ shr(r1, String::kHashShift);
if (i > 0) {
- __ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
+ __ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
}
__ and_(r1, Operand(r2));
- // Scale the index by multiplying by the element size.
- ASSERT(Dictionary::kElementSize == 3);
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
@@ -437,7 +437,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
+ StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -474,7 +474,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
+ StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
__ bind(&miss);
@@ -648,7 +648,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, eax, ecx, ebx);
+ StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
@@ -878,7 +878,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
+ StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
index 83beb65..ce4981d 100644
--- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc
+++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
@@ -41,39 +41,61 @@ static void ProbeTable(MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
- Register offset) {
+ Register offset,
+ Register extra) {
ExternalReference key_offset(SCTableReference::keyReference(table));
ExternalReference value_offset(SCTableReference::valueReference(table));
Label miss;
- // Save the offset on the stack.
- __ push(offset);
+ if (extra.is_valid()) {
+ // Get the code entry from the cache.
+ __ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
- __ j(not_equal, &miss, not_taken);
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+ __ j(not_equal, &miss, not_taken);
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
+ // Jump to the first instruction in the code stub.
+ __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(Operand(extra));
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+ __ bind(&miss);
+ } else {
+ // Save the offset on the stack.
+ __ push(offset);
- // Jump to the first instruction in the code stub.
- __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(offset));
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+ __ j(not_equal, &miss, not_taken);
- // Miss: Restore offset and fall through.
- __ bind(&miss);
- __ pop(offset);
+ // Get the code entry from the cache.
+ __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+
+ // Jump to the first instruction in the code stub.
+ __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(Operand(offset));
+
+ // Pop at miss.
+ __ bind(&miss);
+ __ pop(offset);
+ }
}
@@ -81,7 +103,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
- Register scratch) {
+ Register scratch,
+ Register extra) {
Label miss;
// Make sure that code is valid. The shifting code relies on the
@@ -94,6 +117,9 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Make sure that there are no register conflicts.
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
+ ASSERT(!extra.is(receiver));
+ ASSERT(!extra.is(name));
+ ASSERT(!extra.is(scratch));
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
@@ -106,15 +132,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch);
+ ProbeTable(masm, flags, kPrimary, name, scratch, extra);
// Primary miss: Compute hash for secondary probe.
+ __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+ __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, flags);
+ __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ sub(scratch, Operand(name));
__ add(Operand(scratch), Immediate(flags));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch);
+ ProbeTable(masm, flags, kSecondary, name, scratch, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
@@ -715,7 +745,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
-Object* CallStubCompiler::CompileCallGlobal(JSGlobalObject* object,
+Object* CallStubCompiler::CompileCallGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
@@ -911,7 +941,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
-Object* StoreStubCompiler::CompileStoreGlobal(JSGlobalObject* object,
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
// ----------- S t a t e -------------
@@ -1092,7 +1122,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
-Object* LoadStubCompiler::CompileLoadGlobal(JSGlobalObject* object,
+Object* LoadStubCompiler::CompileLoadGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
diff --git a/V8Binding/v8/src/ic.cc b/V8Binding/v8/src/ic.cc
index e062dd9..dfdf722 100644
--- a/V8Binding/v8/src/ic.cc
+++ b/V8Binding/v8/src/ic.cc
@@ -265,6 +265,39 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
}
+static bool HasInterceptorGetter(JSObject* object) {
+ return !object->GetNamedInterceptor()->getter()->IsUndefined();
+}
+
+
+static void LookupForRead(Object* object,
+ String* name,
+ LookupResult* lookup) {
+ object->Lookup(name, lookup);
+ if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR) {
+ return;
+ }
+
+ JSObject* holder = lookup->holder();
+ if (HasInterceptorGetter(holder)) {
+ return;
+ }
+
+ // There is no getter, just skip it and lookup down the proto chain
+ holder->LocalLookupRealNamedProperty(name, lookup);
+ if (lookup->IsValid()) {
+ return;
+ }
+
+ Object* proto = holder->GetPrototype();
+ if (proto == Heap::null_value()) {
+ return;
+ }
+
+ LookupForRead(proto, name, lookup);
+}
+
+
Object* CallIC::TryCallAsFunction(Object* object) {
HandleScope scope;
Handle<Object> target(object);
@@ -312,7 +345,7 @@ Object* CallIC::LoadFunction(State state,
// Lookup the property in the object.
LookupResult lookup;
- object->Lookup(*name, &lookup);
+ LookupForRead(*object, *name, &lookup);
if (!lookup.IsValid()) {
// If the object does not have the requested property, check which
@@ -419,19 +452,18 @@ void CallIC::UpdateCaches(LookupResult* lookup,
}
case NORMAL: {
if (!object->IsJSObject()) return;
- if (object->IsJSGlobalObject()) {
+ if (object->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
if (lookup->holder() != *global) return;
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- if (cell->value()->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(cell->value());
- code = StubCache::ComputeCallGlobal(argc, in_loop, *name, *global,
- cell, function);
- }
+ if (!cell->value()->IsJSFunction()) return;
+ JSFunction* function = JSFunction::cast(cell->value());
+ code = StubCache::ComputeCallGlobal(argc, in_loop, *name, *global,
+ cell, function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
@@ -444,6 +476,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
break;
}
case INTERCEPTOR: {
+ ASSERT(HasInterceptorGetter(lookup->holder()));
code = StubCache::ComputeCallInterceptor(argc, *name, *object,
lookup->holder());
break;
@@ -455,7 +488,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
- if (code->IsFailure()) return;
+ if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED ||
@@ -530,7 +563,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// Named lookup in the object.
LookupResult lookup;
- object->Lookup(*name, &lookup);
+ LookupForRead(*object, *name, &lookup);
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
@@ -624,11 +657,11 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
break;
}
case NORMAL: {
- if (object->IsJSGlobalObject()) {
+ if (object->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
if (lookup->holder() != *global) return;
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
@@ -654,6 +687,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
break;
}
case INTERCEPTOR: {
+ ASSERT(HasInterceptorGetter(lookup->holder()));
code = StubCache::ComputeLoadInterceptor(*name, *receiver,
lookup->holder());
break;
@@ -665,7 +699,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
- if (code->IsFailure()) return;
+ if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
@@ -745,7 +779,7 @@ Object* KeyedLoadIC::Load(State state,
// Named lookup.
LookupResult lookup;
- object->Lookup(*name, &lookup);
+ LookupForRead(*object, *name, &lookup);
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
@@ -839,6 +873,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
break;
}
case INTERCEPTOR: {
+ ASSERT(HasInterceptorGetter(lookup->holder()));
code = StubCache::ComputeKeyedLoadInterceptor(*name, *receiver,
lookup->holder());
break;
@@ -854,7 +889,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
- if (code->IsFailure()) return;
+ if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.
@@ -885,9 +920,9 @@ static bool StoreICableLookup(LookupResult* lookup) {
}
-static bool LookupForStoreIC(JSObject* object,
- String* name,
- LookupResult* lookup) {
+static bool LookupForWrite(JSObject* object,
+ String* name,
+ LookupResult* lookup) {
object->LocalLookup(name, lookup);
if (!StoreICableLookup(lookup)) {
return false;
@@ -930,7 +965,7 @@ Object* StoreIC::Store(State state,
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
- if (LookupForStoreIC(*receiver, *name, &lookup)) {
+ if (LookupForWrite(*receiver, *name, &lookup)) {
UpdateCaches(&lookup, state, receiver, name, value);
}
}
@@ -976,13 +1011,13 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
break;
}
case NORMAL: {
- if (!receiver->IsJSGlobalObject()) {
+ if (!receiver->IsGlobalObject()) {
return;
}
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
code = StubCache::ComputeStoreGlobal(*name, *global, cell);
@@ -996,6 +1031,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
break;
}
case INTERCEPTOR: {
+ ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
code = StubCache::ComputeStoreInterceptor(*name, *receiver);
break;
}
@@ -1005,7 +1041,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
- if (code->IsFailure()) return;
+ if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
@@ -1127,7 +1163,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
- if (code->IsFailure()) return;
+ if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.
diff --git a/V8Binding/v8/src/interpreter-irregexp.cc b/V8Binding/v8/src/interpreter-irregexp.cc
index 355fae4..0a8ae8c 100644
--- a/V8Binding/v8/src/interpreter-irregexp.cc
+++ b/V8Binding/v8/src/interpreter-irregexp.cc
@@ -115,17 +115,17 @@ static void TraceInterpreter(const byte* code_base,
}
-#define BYTECODE(name) \
- case BC_##name: \
- TraceInterpreter(code_base, \
- pc, \
- backtrack_sp - backtrack_stack, \
- current, \
- current_char, \
- BC_##name##_LENGTH, \
+#define BYTECODE(name) \
+ case BC_##name: \
+ TraceInterpreter(code_base, \
+ pc, \
+ backtrack_sp - backtrack_stack_base, \
+ current, \
+ current_char, \
+ BC_##name##_LENGTH, \
#name);
#else
-#define BYTECODE(name) \
+#define BYTECODE(name) \
case BC_##name:
#endif
@@ -142,6 +142,49 @@ static int32_t Load16Aligned(const byte* pc) {
}
+// A simple abstraction over the backtracking stack used by the interpreter.
+// This backtracking stack does not grow automatically, but it ensures that the
+// the memory held by the stack is released or remembered in a cache if the
+// matching terminates.
+class BacktrackStack {
+ public:
+ explicit BacktrackStack() {
+ if (cache_ != NULL) {
+ // If the cache is not empty reuse the previously allocated stack.
+ data_ = cache_;
+ cache_ = NULL;
+ } else {
+ // Cache was empty. Allocate a new backtrack stack.
+ data_ = NewArray<int>(kBacktrackStackSize);
+ }
+ }
+
+ ~BacktrackStack() {
+ if (cache_ == NULL) {
+ // The cache is empty. Keep this backtrack stack around.
+ cache_ = data_;
+ } else {
+ // A backtrack stack was already cached, just release this one.
+ DeleteArray(data_);
+ }
+ }
+
+ int* data() const { return data_; }
+
+ int max_size() const { return kBacktrackStackSize; }
+
+ private:
+ static const int kBacktrackStackSize = 10000;
+
+ int* data_;
+ static int* cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
+};
+
+int* BacktrackStack::cache_ = NULL;
+
+
template <typename Char>
static bool RawMatch(const byte* code_base,
Vector<const Char> subject,
@@ -149,10 +192,13 @@ static bool RawMatch(const byte* code_base,
int current,
uint32_t current_char) {
const byte* pc = code_base;
- static const int kBacktrackStackSize = 10000;
- int backtrack_stack[kBacktrackStackSize];
- int backtrack_stack_space = kBacktrackStackSize;
- int* backtrack_sp = backtrack_stack;
+ // BacktrackStack ensures that the memory allocated for the backtracking stack
+ // is returned to the system or cached if there is no stack being cached at
+ // the moment.
+ BacktrackStack backtrack_stack;
+ int* backtrack_stack_base = backtrack_stack.data();
+ int* backtrack_sp = backtrack_stack_base;
+ int backtrack_stack_space = backtrack_stack.max_size();
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
PrintF("\n\nStart bytecode interpreter\n\n");
@@ -202,13 +248,13 @@ static bool RawMatch(const byte* code_base,
pc += BC_SET_CP_TO_REGISTER_LENGTH;
break;
BYTECODE(SET_REGISTER_TO_SP)
- registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack;
+ registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base;
pc += BC_SET_REGISTER_TO_SP_LENGTH;
break;
BYTECODE(SET_SP_TO_REGISTER)
- backtrack_sp = backtrack_stack + registers[insn >> BYTECODE_SHIFT];
- backtrack_stack_space = kBacktrackStackSize -
- (backtrack_sp - backtrack_stack);
+ backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
+ backtrack_stack_space = backtrack_stack.max_size() -
+ (backtrack_sp - backtrack_stack_base);
pc += BC_SET_SP_TO_REGISTER_LENGTH;
break;
BYTECODE(POP_CP)
diff --git a/V8Binding/v8/src/log.cc b/V8Binding/v8/src/log.cc
index 0dba08d..2ca89dd 100644
--- a/V8Binding/v8/src/log.cc
+++ b/V8Binding/v8/src/log.cc
@@ -176,8 +176,11 @@ class Ticker: public Sampler {
~Ticker() { if (IsActive()) Stop(); }
+ void SampleStack(TickSample* sample) {
+ StackTracer::Trace(sample);
+ }
+
void Tick(TickSample* sample) {
- if (IsProfiling()) StackTracer::Trace(sample);
if (profiler_) profiler_->Insert(sample);
if (window_) window_->AddState(sample->state);
}
diff --git a/V8Binding/v8/src/math.js b/V8Binding/v8/src/math.js
index 1f5ce87..d12927e 100644
--- a/V8Binding/v8/src/math.js
+++ b/V8Binding/v8/src/math.js
@@ -95,7 +95,9 @@ function MathExp(x) {
// ECMA 262 - 15.8.2.9
function MathFloor(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- if (0 < x && x <= 0x7FFFFFFF) {
+ // It's more common to call this with a positive number that's out
+ // of range than negative numbers; check the upper bound first.
+ if (x <= 0x7FFFFFFF && x > 0) {
// Numbers in the range [0, 2^31) can be floored by converting
// them to an unsigned 32-bit value using the shift operator.
// We avoid doing so for -0, because the result of Math.floor(-0)
diff --git a/V8Binding/v8/src/messages.js b/V8Binding/v8/src/messages.js
index 882fed5..6157874 100644
--- a/V8Binding/v8/src/messages.js
+++ b/V8Binding/v8/src/messages.js
@@ -609,7 +609,6 @@ CallSite.prototype.getTypeName = function () {
CallSite.prototype.isToplevel = function () {
if (this.receiver == null)
return true;
- var className = $Object.prototype.toString.call(this.receiver);
return IS_GLOBAL(this.receiver);
};
@@ -626,13 +625,34 @@ CallSite.prototype.getEvalOrigin = function () {
script.eval_from_position);
};
+CallSite.prototype.getFunction = function () {
+ return this.fun;
+};
+
CallSite.prototype.getFunctionName = function () {
// See if the function knows its own name
var name = this.fun.name;
- if (name)
+ if (name) {
return name;
+ } else {
+ return %FunctionGetInferredName(this.fun);
+ }
+ // Maybe this is an evaluation?
+ var script = %FunctionGetScript(this.fun);
+ if (script && script.compilation_type == 1)
+ return "eval";
+ return null;
+};
+
+CallSite.prototype.getMethodName = function () {
// See if we can find a unique property on the receiver that holds
// this function.
+ var ownName = this.fun.name;
+ if (ownName && this.receiver && this.receiver[ownName] === this.fun)
+ // To handle DontEnum properties we guess that the method has
+ // the same name as the function.
+ return ownName;
+ var name = null;
for (var prop in this.receiver) {
if (this.receiver[prop] === this.fun) {
// If we find more than one match bail out to avoid confusion
@@ -643,10 +663,6 @@ CallSite.prototype.getFunctionName = function () {
}
if (name)
return name;
- // Maybe this is an evaluation?
- var script = %FunctionGetScript(this.fun);
- if (script && script.compilation_type == 1)
- return "eval";
return null;
};
@@ -717,18 +733,31 @@ function FormatSourcePosition(frame) {
fileLocation = "unknown source";
}
var line = "";
- var functionName = frame.getFunctionName();
- if (functionName) {
- if (frame.isToplevel()) {
+ var functionName = frame.getFunction().name;
+ var methodName = frame.getMethodName();
+ var addPrefix = true;
+ var isConstructor = frame.isConstructor();
+ var isMethodCall = !(frame.isToplevel() || isConstructor);
+ if (isMethodCall) {
+ line += frame.getTypeName() + ".";
+ if (functionName) {
line += functionName;
- } else if (frame.isConstructor()) {
- line += "new " + functionName;
+ if (methodName && (methodName != functionName)) {
+ line += " [as " + methodName + "]";
+ }
} else {
- line += frame.getTypeName() + "." + functionName;
+ line += methodName || "<anonymous>";
}
- line += " (" + fileLocation + ")";
+ } else if (isConstructor) {
+ line += "new " + (functionName || "<anonymous>");
+ } else if (functionName) {
+ line += functionName;
} else {
line += fileLocation;
+ addPrefix = false;
+ }
+ if (addPrefix) {
+ line += " (" + fileLocation + ")";
}
return line;
}
@@ -812,8 +841,12 @@ function DefineError(f) {
} else if (!IS_UNDEFINED(m)) {
this.message = ToString(m);
}
- if ($Error.captureStackTraces) {
- var raw_stack = %CollectStackTrace(f);
+ var stackTraceLimit = $Error.stackTraceLimit;
+ if (stackTraceLimit) {
+ // Cap the limit to avoid extremely big traces
+ if (stackTraceLimit < 0 || stackTraceLimit > 10000)
+ stackTraceLimit = 10000;
+ var raw_stack = %CollectStackTrace(f, stackTraceLimit);
DefineOneShotAccessor(this, 'stack', function (obj) {
return FormatRawStackTrace(obj, raw_stack);
});
diff --git a/V8Binding/v8/src/objects-debug.cc b/V8Binding/v8/src/objects-debug.cc
index eec0be7..1f199e4 100644
--- a/V8Binding/v8/src/objects-debug.cc
+++ b/V8Binding/v8/src/objects-debug.cc
@@ -745,25 +745,6 @@ void Proxy::ProxyVerify() {
}
-void Dictionary::Print() {
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
- PrintF(" ");
- if (k->IsString()) {
- String::cast(k)->StringPrint();
- } else {
- k->ShortPrint();
- }
- PrintF(": ");
- ValueAt(i)->ShortPrint();
- PrintF("\n");
- }
- }
-}
-
-
void AccessorInfo::AccessorInfoVerify() {
CHECK(IsAccessorInfo());
VerifyPointer(getter());
@@ -1019,7 +1000,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->unused_property_fields();
} else {
- Dictionary* dict = property_dictionary();
+ StringDictionary* dict = property_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict->Capacity() - dict->NumberOfElements();
@@ -1036,7 +1017,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_used_elements_ += len - holes;
info->number_of_fast_unused_elements_ += holes;
} else {
- Dictionary* dict = element_dictionary();
+ NumberDictionary* dict = element_dictionary();
info->number_of_slow_used_elements_ += dict->NumberOfElements();
info->number_of_slow_unused_elements_ +=
dict->Capacity() - dict->NumberOfElements();
diff --git a/V8Binding/v8/src/objects-inl.h b/V8Binding/v8/src/objects-inl.h
index c360fd7..ff0f2e5 100644
--- a/V8Binding/v8/src/objects-inl.h
+++ b/V8Binding/v8/src/objects-inl.h
@@ -699,7 +699,7 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
int Smi::value() {
- return static_cast<int>(reinterpret_cast<intptr_t>(this) >> kSmiTagSize);
+ return static_cast<int>(reinterpret_cast<intptr_t>(this)) >> kSmiTagSize;
}
@@ -1370,15 +1370,14 @@ void DescriptorArray::Swap(int first, int second) {
}
-bool Dictionary::requires_slow_elements() {
+bool NumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return false;
return 0 !=
(Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
}
-
-uint32_t Dictionary::max_number_key() {
+uint32_t NumberDictionary::max_number_key() {
ASSERT(!requires_slow_elements());
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return 0;
@@ -1386,8 +1385,7 @@ uint32_t Dictionary::max_number_key() {
return value >> kRequiresSlowElementsTagSize;
}
-
-void Dictionary::set_requires_slow_elements() {
+void NumberDictionary::set_requires_slow_elements() {
set(kMaxNumberKeyIndex,
Smi::FromInt(kRequiresSlowElementsMask),
SKIP_WRITE_BARRIER);
@@ -1400,7 +1398,6 @@ void Dictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(Dictionary)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(MapCache)
@@ -1439,9 +1436,9 @@ CAST_ACCESSOR(Struct)
STRUCT_LIST(MAKE_STRUCT_CAST)
#undef MAKE_STRUCT_CAST
-template <int prefix_size, int elem_size>
-HashTable<prefix_size, elem_size>* HashTable<prefix_size, elem_size>::cast(
- Object* obj) {
+
+template <typename Shape, typename Key>
+HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
ASSERT(obj->IsHashTable());
return reinterpret_cast<HashTable*>(obj);
}
@@ -2468,15 +2465,15 @@ bool JSObject::HasIndexedInterceptor() {
}
-Dictionary* JSObject::property_dictionary() {
+StringDictionary* JSObject::property_dictionary() {
ASSERT(!HasFastProperties());
- return Dictionary::cast(properties());
+ return StringDictionary::cast(properties());
}
-Dictionary* JSObject::element_dictionary() {
+NumberDictionary* JSObject::element_dictionary() {
ASSERT(!HasFastElements());
- return Dictionary::cast(elements());
+ return NumberDictionary::cast(elements());
}
@@ -2640,16 +2637,17 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
}
-void Dictionary::SetEntry(int entry,
- Object* key,
- Object* value,
- PropertyDetails details) {
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::SetEntry(int entry,
+ Object* key,
+ Object* value,
+ PropertyDetails details) {
ASSERT(!key->IsString() || details.index() > 0);
- int index = EntryToIndex(entry);
- WriteBarrierMode mode = GetWriteBarrierMode();
- set(index, key, mode);
- set(index+1, value, mode);
- fast_set(this, index+2, details.AsSmi());
+ int index = HashTable<Shape, Key>::EntryToIndex(entry);
+ WriteBarrierMode mode = FixedArray::GetWriteBarrierMode();
+ FixedArray::set(index, key, mode);
+ FixedArray::set(index+1, value, mode);
+ FixedArray::fast_set(this, index+2, details.AsSmi());
}
diff --git a/V8Binding/v8/src/objects.cc b/V8Binding/v8/src/objects.cc
index 2ba7d36..ee0ac2d 100644
--- a/V8Binding/v8/src/objects.cc
+++ b/V8Binding/v8/src/objects.cc
@@ -41,6 +41,7 @@
#include "disassembler.h"
#endif
+
namespace v8 {
namespace internal {
@@ -402,7 +403,7 @@ Object* JSObject::DeleteLazyProperty(LookupResult* result,
Object* JSObject::GetNormalizedProperty(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
value = JSGlobalPropertyCell::cast(value)->value();
}
ASSERT(!value->IsJSGlobalPropertyCell());
@@ -412,7 +413,7 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
ASSERT(!HasFastProperties());
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(
property_dictionary()->ValueAt(result->GetDictionaryEntry()));
@@ -428,31 +429,31 @@ Object* JSObject::SetNormalizedProperty(String* name,
Object* value,
PropertyDetails details) {
ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindStringEntry(name);
- if (entry == Dictionary::kNotFound) {
+ int entry = property_dictionary()->FindEntry(name);
+ if (entry == StringDictionary::kNotFound) {
Object* store_value = value;
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
store_value = Heap::AllocateJSGlobalPropertyCell(value);
if (store_value->IsFailure()) return store_value;
}
Object* dict =
- property_dictionary()->AddStringEntry(name, store_value, details);
+ property_dictionary()->Add(name, store_value, details);
if (dict->IsFailure()) return dict;
- set_properties(Dictionary::cast(dict));
+ set_properties(StringDictionary::cast(dict));
return value;
}
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
property_dictionary()->DetailsAt(entry).index());
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
cell->set_value(value);
// Please note we have to update the property details.
property_dictionary()->DetailsAtPut(entry, details);
} else {
- property_dictionary()->SetStringEntry(entry, name, value, details);
+ property_dictionary()->SetEntry(entry, name, value, details);
}
return value;
}
@@ -460,11 +461,11 @@ Object* JSObject::SetNormalizedProperty(String* name,
Object* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
ASSERT(!HasFastProperties());
- Dictionary* dictionary = property_dictionary();
- int entry = dictionary->FindStringEntry(name);
- if (entry != Dictionary::kNotFound) {
+ StringDictionary* dictionary = property_dictionary();
+ int entry = dictionary->FindEntry(name);
+ if (entry != StringDictionary::kNotFound) {
// If we have a global object set the cell to the hole.
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsDontDelete() && mode != FORCE_DELETION) {
return Heap::false_value();
@@ -1340,16 +1341,20 @@ Object* JSObject::AddSlowProperty(String* name,
Object* value,
PropertyAttributes attributes) {
ASSERT(!HasFastProperties());
- Dictionary* dict = property_dictionary();
+ StringDictionary* dict = property_dictionary();
Object* store_value = value;
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
// In case name is an orphaned property reuse the cell.
- int entry = dict->FindStringEntry(name);
- if (entry != Dictionary::kNotFound) {
+ int entry = dict->FindEntry(name);
+ if (entry != StringDictionary::kNotFound) {
store_value = dict->ValueAt(entry);
JSGlobalPropertyCell::cast(store_value)->set_value(value);
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
- dict->SetStringEntry(entry, name, store_value, details);
+ // Assign an enumeration index to the property and update
+ // SetNextEnumerationIndex.
+ int index = dict->NextEnumerationIndex();
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
+ dict->SetNextEnumerationIndex(index + 1);
+ dict->SetEntry(entry, name, store_value, details);
return value;
}
store_value = Heap::AllocateJSGlobalPropertyCell(value);
@@ -1357,9 +1362,9 @@ Object* JSObject::AddSlowProperty(String* name,
JSGlobalPropertyCell::cast(store_value)->set_value(value);
}
PropertyDetails details = PropertyDetails(attributes, NORMAL);
- Object* result = dict->AddStringEntry(name, store_value, details);
+ Object* result = dict->Add(name, store_value, details);
if (result->IsFailure()) return result;
- if (dict != result) set_properties(Dictionary::cast(result));
+ if (dict != result) set_properties(StringDictionary::cast(result));
return value;
}
@@ -1405,8 +1410,8 @@ Object* JSObject::SetPropertyPostInterceptor(String* name,
Object* JSObject::ReplaceSlowProperty(String* name,
Object* value,
PropertyAttributes attributes) {
- Dictionary* dictionary = property_dictionary();
- int old_index = dictionary->FindStringEntry(name);
+ StringDictionary* dictionary = property_dictionary();
+ int old_index = dictionary->FindEntry(name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
@@ -1646,9 +1651,9 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
pt != Heap::null_value();
pt = pt->GetPrototype()) {
if (JSObject::cast(pt)->HasFastElements()) continue;
- Dictionary* dictionary = JSObject::cast(pt)->element_dictionary();
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != StringDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -1698,12 +1703,12 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
return;
}
} else {
- int entry = property_dictionary()->FindStringEntry(name);
- if (entry != Dictionary::kNotFound) {
+ int entry = property_dictionary()->FindEntry(name);
+ if (entry != StringDictionary::kNotFound) {
// Make sure to disallow caching for uninitialized constants
// found in the dictionary-mode objects.
Object* value = property_dictionary()->ValueAt(entry);
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
PropertyDetails d = property_dictionary()->DetailsAt(entry);
if (d.IsDeleted()) {
result->NotFound();
@@ -2101,9 +2106,9 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
// Allocate new content
Object* obj =
- Dictionary::Allocate(map()->NumberOfDescribedProperties() * 2 + 4);
+ StringDictionary::Allocate(map()->NumberOfDescribedProperties() * 2 + 4);
if (obj->IsFailure()) return obj;
- Dictionary* dictionary = Dictionary::cast(obj);
+ StringDictionary* dictionary = StringDictionary::cast(obj);
for (DescriptorReader r(map()->instance_descriptors());
!r.eos();
@@ -2114,39 +2119,39 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = r.GetConstantFunction();
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
value = Heap::AllocateJSGlobalPropertyCell(value);
if (value->IsFailure()) return value;
}
- Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
+ Object* result = dictionary->Add(r.GetKey(), value, d);
if (result->IsFailure()) return result;
- dictionary = Dictionary::cast(result);
+ dictionary = StringDictionary::cast(result);
break;
}
case FIELD: {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = FastPropertyAt(r.GetFieldIndex());
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
value = Heap::AllocateJSGlobalPropertyCell(value);
if (value->IsFailure()) return value;
}
- Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
+ Object* result = dictionary->Add(r.GetKey(), value, d);
if (result->IsFailure()) return result;
- dictionary = Dictionary::cast(result);
+ dictionary = StringDictionary::cast(result);
break;
}
case CALLBACKS: {
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
Object* value = r.GetCallbacksObject();
- if (IsJSGlobalObject()) {
+ if (IsGlobalObject()) {
value = Heap::AllocateJSGlobalPropertyCell(value);
if (value->IsFailure()) return value;
}
- Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
+ Object* result = dictionary->Add(r.GetKey(), value, d);
if (result->IsFailure()) return result;
- dictionary = Dictionary::cast(result);
+ dictionary = StringDictionary::cast(result);
break;
}
case MAP_TRANSITION:
@@ -2203,9 +2208,9 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
Object* JSObject::TransformToFastProperties(int unused_property_fields) {
if (HasFastProperties()) return this;
- ASSERT(!IsJSGlobalObject());
+ ASSERT(!IsGlobalObject());
return property_dictionary()->
- TransformPropertiesToFastFor(this, unused_property_fields);
+ TransformPropertiesToFastFor(this, unused_property_fields);
}
@@ -2219,9 +2224,9 @@ Object* JSObject::NormalizeElements() {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
array->length();
- Object* obj = Dictionary::Allocate(length);
+ Object* obj = NumberDictionary::Allocate(length);
if (obj->IsFailure()) return obj;
- Dictionary* dictionary = Dictionary::cast(obj);
+ NumberDictionary* dictionary = NumberDictionary::cast(obj);
// Copy entries.
for (int i = 0; i < length; i++) {
Object* value = array->get(i);
@@ -2229,7 +2234,7 @@ Object* JSObject::NormalizeElements() {
PropertyDetails details = PropertyDetails(NONE, NORMAL);
Object* result = dictionary->AddNumberEntry(i, array->get(i), details);
if (result->IsFailure()) return result;
- dictionary = Dictionary::cast(result);
+ dictionary = NumberDictionary::cast(result);
}
}
// Switch to using the dictionary as the backing storage for elements.
@@ -2306,9 +2311,9 @@ Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
return Heap::true_value();
}
ASSERT(!HasFastElements());
- Dictionary* dictionary = element_dictionary();
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
return dictionary->DeleteProperty(entry, mode);
}
return Heap::true_value();
@@ -2380,9 +2385,9 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
}
return Heap::true_value();
} else {
- Dictionary* dictionary = element_dictionary();
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
return dictionary->DeleteProperty(entry, mode);
}
}
@@ -2687,9 +2692,9 @@ Object* JSObject::DefineGetterSetter(String* name,
if (is_element) {
// Lookup the index.
if (!HasFastElements()) {
- Dictionary* dictionary = element_dictionary();
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsReadOnly()) return Heap::undefined_value();
@@ -2725,19 +2730,27 @@ Object* JSObject::DefineGetterSetter(String* name,
// Update the dictionary with the new CALLBACKS property.
Object* dict =
- element_dictionary()->SetOrAddNumberEntry(index, structure, details);
+ element_dictionary()->Set(index, structure, details);
if (dict->IsFailure()) return dict;
// If name is an index we need to stay in slow case.
- Dictionary* elements = Dictionary::cast(dict);
+ NumberDictionary* elements = NumberDictionary::cast(dict);
elements->set_requires_slow_elements();
// Set the potential new dictionary on the object.
- set_elements(Dictionary::cast(dict));
+ set_elements(NumberDictionary::cast(dict));
} else {
// Normalize object to make this operation simple.
Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
if (ok->IsFailure()) return ok;
+ // For the global object allocate a new map to invalidate the global inline
+ // caches which have a global property cell reference directly in the code.
+ if (IsGlobalObject()) {
+ Object* new_map = map()->CopyDropDescriptors();
+ if (new_map->IsFailure()) return new_map;
+ set_map(Map::cast(new_map));
+ }
+
// Update the dictionary with the new CALLBACKS property.
return SetNormalizedProperty(name, structure, details);
}
@@ -2791,9 +2804,9 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
obj = JSObject::cast(obj)->GetPrototype()) {
JSObject* jsObject = JSObject::cast(obj);
if (!jsObject->HasFastElements()) {
- Dictionary* dictionary = jsObject->element_dictionary();
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ NumberDictionary* dictionary = jsObject->element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -2996,7 +3009,7 @@ Object* FixedArray::AddKeysFromJSArray(JSArray* array) {
return UnionOfKeys(array->elements());
}
ASSERT(!array->HasFastElements());
- Dictionary* dict = array->element_dictionary();
+ NumberDictionary* dict = array->element_dictionary();
int size = dict->NumberOfElements();
// Allocate a temporary fixed array.
@@ -5067,7 +5080,7 @@ void JSObject::SetFastElements(FixedArray* elems) {
elems->set(i, old_elements->get(i), mode);
}
} else {
- Dictionary* dictionary = Dictionary::cast(elements());
+ NumberDictionary* dictionary = NumberDictionary::cast(elements());
for (int i = 0; i < dictionary->Capacity(); i++) {
Object* key = dictionary->KeyAt(i);
if (key->IsNumber()) {
@@ -5232,7 +5245,8 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
return true;
}
} else {
- if (element_dictionary()->FindNumberEntry(index) != Dictionary::kNotFound) {
+ if (element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound) {
return true;
}
}
@@ -5309,8 +5323,8 @@ bool JSObject::HasLocalElement(uint32_t index) {
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
} else {
- return element_dictionary()->FindNumberEntry(index)
- != Dictionary::kNotFound;
+ return element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound;
}
}
@@ -5336,7 +5350,8 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
if ((index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
} else {
- if (element_dictionary()->FindNumberEntry(index) != Dictionary::kNotFound) {
+ if (element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound) {
return true;
}
}
@@ -5471,10 +5486,10 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
// Insert element in the dictionary.
FixedArray* elms = FixedArray::cast(elements());
- Dictionary* dictionary = Dictionary::cast(elms);
+ NumberDictionary* dictionary = NumberDictionary::cast(elms);
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -5523,7 +5538,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &new_length));
JSArray::cast(this)->set_length(Smi::FromInt(new_length));
} else {
- new_length = Dictionary::cast(elements())->max_number_key() + 1;
+ new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
}
Object* obj = Heap::AllocateFixedArrayWithHoles(new_length);
if (obj->IsFailure()) return obj;
@@ -5566,9 +5581,9 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
if (!value->IsTheHole()) return value;
}
} else {
- Dictionary* dictionary = element_dictionary();
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -5650,9 +5665,9 @@ Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) {
if (!value->IsTheHole()) return value;
}
} else {
- Dictionary* dictionary = element_dictionary();
- int entry = dictionary->FindNumberEntry(index);
- if (entry != Dictionary::kNotFound) {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -5688,7 +5703,7 @@ bool JSObject::HasDenseElements() {
if (!elms->get(i)->IsTheHole()) number_of_elements++;
}
} else {
- Dictionary* dictionary = Dictionary::cast(elements());
+ NumberDictionary* dictionary = NumberDictionary::cast(elements());
capacity = dictionary->Capacity();
number_of_elements = dictionary->NumberOfElements();
}
@@ -5710,7 +5725,7 @@ bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
bool JSObject::ShouldConvertToFastElements() {
ASSERT(!HasFastElements());
- Dictionary* dictionary = Dictionary::cast(elements());
+ NumberDictionary* dictionary = NumberDictionary::cast(elements());
// If the elements are sparse, we should not go back to fast case.
if (!HasDenseElements()) return false;
// If an element has been added at a very high index in the elements
@@ -5729,17 +5744,47 @@ bool JSObject::ShouldConvertToFastElements() {
length = dictionary->max_number_key();
}
return static_cast<uint32_t>(dictionary->Capacity()) >=
- (length / (2 * Dictionary::kElementSize));
+ (length / (2 * NumberDictionary::kEntrySize));
+}
+
+
+// Certain compilers request function template instantiation when they
+// see the definition of the other template functions in the
+// class. This requires us to have the template functions put
+// together, so even though this function belongs in objects-debug.cc,
+// we keep it here instead to satisfy certain compilers.
+#ifdef DEBUG
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::Print() {
+ int capacity = HashTable<Shape, Key>::Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = HashTable<Shape, Key>::KeyAt(i);
+ if (HashTable<Shape, Key>::IsKey(k)) {
+ PrintF(" ");
+ if (k->IsString()) {
+ String::cast(k)->StringPrint();
+ } else {
+ k->ShortPrint();
+ }
+ PrintF(": ");
+ ValueAt(i)->ShortPrint();
+ PrintF("\n");
+ }
+ }
}
+#endif
-void Dictionary::CopyValuesTo(FixedArray* elements) {
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
- int capacity = Capacity();
+ int capacity = HashTable<Shape, Key>::Capacity();
WriteBarrierMode mode = elements->GetWriteBarrierMode();
for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) elements->set(pos++, ValueAt(i), mode);
+ Object* k = Dictionary<Shape, Key>::KeyAt(i);
+ if (Dictionary<Shape, Key>::IsKey(k)) {
+ elements->set(pos++, ValueAt(i), mode);
+ }
}
ASSERT(pos == elements->length());
}
@@ -5945,8 +5990,8 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
}
- return element_dictionary()->FindNumberEntry(index)
- != Dictionary::kNotFound;
+ return element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound;
}
@@ -6179,38 +6224,49 @@ int JSObject::GetEnumElementKeys(FixedArray* storage) {
}
-// The NumberKey uses carries the uint32_t as key.
-// This avoids allocation in HasProperty.
-class NumberKey : public HashTableKey {
- public:
- explicit NumberKey(uint32_t number) : number_(number) { }
+bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
+ ASSERT(other->IsNumber());
+ return key == static_cast<uint32_t>(other->Number());
+}
- bool IsMatch(Object* number) {
- return number_ == ToUint32(number);
- }
- uint32_t Hash() { return ComputeIntegerHash(number_); }
+uint32_t NumberDictionaryShape::Hash(uint32_t key) {
+ return ComputeIntegerHash(key);
+}
- HashFunction GetHashFunction() { return NumberHash; }
- Object* GetObject() {
- return Heap::NumberFromDouble(number_);
- }
+uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
+ ASSERT(other->IsNumber());
+ return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
+}
- bool IsStringKey() { return false; }
- private:
- static uint32_t NumberHash(Object* obj) {
- return ComputeIntegerHash(ToUint32(obj));
- }
+Object* NumberDictionaryShape::AsObject(uint32_t key) {
+ return Heap::NumberFromUint32(key);
+}
- static uint32_t ToUint32(Object* obj) {
- ASSERT(obj->IsNumber());
- return static_cast<uint32_t>(obj->Number());
- }
- uint32_t number_;
-};
+bool StringDictionaryShape::IsMatch(String* key, Object* other) {
+ // We know that all entries in a hash table had their hash keys created.
+ // Use that knowledge to have fast failure.
+ if (key->Hash() != String::cast(other)->Hash()) return false;
+ return key->Equals(String::cast(other));
+}
+
+
+uint32_t StringDictionaryShape::Hash(String* key) {
+ return key->Hash();
+}
+
+
+uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) {
+ return String::cast(other)->Hash();
+}
+
+
+Object* StringDictionaryShape::AsObject(String* key) {
+ return key;
+}
// StringKey simply carries a string object as key.
@@ -6218,12 +6274,12 @@ class StringKey : public HashTableKey {
public:
explicit StringKey(String* string) :
string_(string),
- hash_(StringHash(string)) { }
+ hash_(HashForObject(string)) { }
bool IsMatch(Object* string) {
// We know that all entries in a hash table had their hash keys created.
// Use that knowledge to have fast failure.
- if (hash_ != StringHash(string)) {
+ if (hash_ != HashForObject(string)) {
return false;
}
return string_->Equals(String::cast(string));
@@ -6231,15 +6287,9 @@ class StringKey : public HashTableKey {
uint32_t Hash() { return hash_; }
- HashFunction GetHashFunction() { return StringHash; }
-
- Object* GetObject() { return string_; }
-
- static uint32_t StringHash(Object* obj) {
- return String::cast(obj)->Hash();
- }
+ uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); }
- bool IsStringKey() { return true; }
+ Object* AsObject() { return string_; }
String* string_;
uint32_t hash_;
@@ -6261,10 +6311,6 @@ class StringSharedKey : public HashTableKey {
return source->Equals(source_);
}
- typedef uint32_t (*HashFunction)(Object* obj);
-
- virtual HashFunction GetHashFunction() { return StringSharedHash; }
-
static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared) {
uint32_t hash = source->Hash();
@@ -6281,18 +6327,18 @@ class StringSharedKey : public HashTableKey {
return hash;
}
- static uint32_t StringSharedHash(Object* obj) {
+ uint32_t Hash() {
+ return StringSharedHashHelper(source_, shared_);
+ }
+
+ uint32_t HashForObject(Object* obj) {
FixedArray* pair = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
String* source = String::cast(pair->get(1));
return StringSharedHashHelper(source, shared);
}
- virtual uint32_t Hash() {
- return StringSharedHashHelper(source_, shared_);
- }
-
- virtual Object* GetObject() {
+ Object* AsObject() {
Object* obj = Heap::AllocateFixedArray(2);
if (obj->IsFailure()) return obj;
FixedArray* pair = FixedArray::cast(obj);
@@ -6301,8 +6347,6 @@ class StringSharedKey : public HashTableKey {
return pair;
}
- virtual bool IsStringKey() { return false; }
-
private:
String* source_;
SharedFunctionInfo* shared_;
@@ -6324,16 +6368,14 @@ class RegExpKey : public HashTableKey {
uint32_t Hash() { return RegExpHash(string_, flags_); }
- HashFunction GetHashFunction() { return RegExpObjectHash; }
-
- Object* GetObject() {
+ Object* AsObject() {
// Plain hash maps, which is where regexp keys are used, don't
// use this function.
UNREACHABLE();
return NULL;
}
- static uint32_t RegExpObjectHash(Object* obj) {
+ uint32_t HashForObject(Object* obj) {
FixedArray* val = FixedArray::cast(obj);
return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
Smi::cast(val->get(JSRegExp::kFlagsIndex)));
@@ -6343,8 +6385,6 @@ class RegExpKey : public HashTableKey {
return string->Hash() + flags->value();
}
- bool IsStringKey() { return false; }
-
String* string_;
Smi* flags_;
};
@@ -6359,10 +6399,6 @@ class Utf8SymbolKey : public HashTableKey {
return String::cast(string)->IsEqualTo(string_);
}
- HashFunction GetHashFunction() {
- return StringHash;
- }
-
uint32_t Hash() {
if (length_field_ != 0) return length_field_ >> String::kHashShift;
unibrow::Utf8InputBuffer<> buffer(string_.start(),
@@ -6374,17 +6410,15 @@ class Utf8SymbolKey : public HashTableKey {
return result;
}
- Object* GetObject() {
- if (length_field_ == 0) Hash();
- return Heap::AllocateSymbol(string_, chars_, length_field_);
+ uint32_t HashForObject(Object* other) {
+ return String::cast(other)->Hash();
}
- static uint32_t StringHash(Object* obj) {
- return String::cast(obj)->Hash();
+ Object* AsObject() {
+ if (length_field_ == 0) Hash();
+ return Heap::AllocateSymbol(string_, chars_, length_field_);
}
- bool IsStringKey() { return true; }
-
Vector<const char> string_;
uint32_t length_field_;
int chars_; // Caches the number of characters when computing the hash code.
@@ -6396,17 +6430,17 @@ class SymbolKey : public HashTableKey {
public:
explicit SymbolKey(String* string) : string_(string) { }
- HashFunction GetHashFunction() {
- return StringHash;
- }
-
bool IsMatch(Object* string) {
return String::cast(string)->Equals(string_);
}
uint32_t Hash() { return string_->Hash(); }
- Object* GetObject() {
+ uint32_t HashForObject(Object* other) {
+ return String::cast(other)->Hash();
+ }
+
+ Object* AsObject() {
// If the string is a cons string, attempt to flatten it so that
// symbols will most often be flat strings.
if (StringShape(string_).IsCons()) {
@@ -6434,28 +6468,27 @@ class SymbolKey : public HashTableKey {
return String::cast(obj)->Hash();
}
- bool IsStringKey() { return true; }
-
String* string_;
};
-template<int prefix_size, int element_size>
-void HashTable<prefix_size, element_size>::IteratePrefix(ObjectVisitor* v) {
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::IteratePrefix(ObjectVisitor* v) {
IteratePointers(v, 0, kElementsStartOffset);
}
-template<int prefix_size, int element_size>
-void HashTable<prefix_size, element_size>::IterateElements(ObjectVisitor* v) {
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
IteratePointers(v,
kElementsStartOffset,
kHeaderSize + length() * kPointerSize);
}
-template<int prefix_size, int element_size>
-Object* HashTable<prefix_size, element_size>::Allocate(int at_least_space_for) {
+template<typename Shape, typename Key>
+Object* HashTable<Shape, Key>::Allocate(
+ int at_least_space_for) {
int capacity = RoundUpToPowerOf2(at_least_space_for);
if (capacity < 4) capacity = 4; // Guarantee min capacity.
Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
@@ -6467,27 +6500,28 @@ Object* HashTable<prefix_size, element_size>::Allocate(int at_least_space_for) {
}
+
// Find entry for key otherwise return -1.
-template <int prefix_size, int element_size>
-int HashTable<prefix_size, element_size>::FindEntry(HashTableKey* key) {
+template<typename Shape, typename Key>
+int HashTable<Shape, Key>::FindEntry(Key key) {
uint32_t nof = NumberOfElements();
if (nof == 0) return kNotFound; // Bail out if empty.
uint32_t capacity = Capacity();
- uint32_t hash = key->Hash();
+ uint32_t hash = Shape::Hash(key);
uint32_t entry = GetProbe(hash, 0, capacity);
Object* element = KeyAt(entry);
uint32_t passed_elements = 0;
if (!element->IsNull()) {
- if (!element->IsUndefined() && key->IsMatch(element)) return entry;
+ if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
if (++passed_elements == nof) return kNotFound;
}
for (uint32_t i = 1; !element->IsUndefined(); i++) {
entry = GetProbe(hash, i, capacity);
element = KeyAt(entry);
if (!element->IsNull()) {
- if (!element->IsUndefined() && key->IsMatch(element)) return entry;
+ if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
if (++passed_elements == nof) return kNotFound;
}
}
@@ -6495,9 +6529,8 @@ int HashTable<prefix_size, element_size>::FindEntry(HashTableKey* key) {
}
-template<int prefix_size, int element_size>
-Object* HashTable<prefix_size, element_size>::EnsureCapacity(
- int n, HashTableKey* key) {
+template<typename Shape, typename Key>
+Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
// Make sure 50% is free
@@ -6509,18 +6542,20 @@ Object* HashTable<prefix_size, element_size>::EnsureCapacity(
WriteBarrierMode mode = table->GetWriteBarrierMode();
// Copy prefix to new array.
- for (int i = kPrefixStartIndex; i < kPrefixStartIndex + prefix_size; i++) {
+ for (int i = kPrefixStartIndex;
+ i < kPrefixStartIndex + Shape::kPrefixSize;
+ i++) {
table->set(i, get(i), mode);
}
// Rehash the elements.
- uint32_t (*Hash)(Object* key) = key->GetHashFunction();
for (int i = 0; i < capacity; i++) {
uint32_t from_index = EntryToIndex(i);
- Object* key = get(from_index);
- if (IsKey(key)) {
+ Object* k = get(from_index);
+ if (IsKey(k)) {
+ uint32_t hash = Shape::HashForObject(key, k);
uint32_t insertion_index =
- EntryToIndex(table->FindInsertionEntry(key, Hash(key)));
- for (int j = 0; j < element_size; j++) {
+ EntryToIndex(table->FindInsertionEntry(hash));
+ for (int j = 0; j < Shape::kEntrySize; j++) {
table->set(insertion_index + j, get(from_index + j), mode);
}
}
@@ -6530,10 +6565,8 @@ Object* HashTable<prefix_size, element_size>::EnsureCapacity(
}
-template<int prefix_size, int element_size>
-uint32_t HashTable<prefix_size, element_size>::FindInsertionEntry(
- Object* key,
- uint32_t hash) {
+template<typename Shape, typename Key>
+uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
uint32_t capacity = Capacity();
uint32_t entry = GetProbe(hash, 0, capacity);
Object* element = KeyAt(entry);
@@ -6546,18 +6579,80 @@ uint32_t HashTable<prefix_size, element_size>::FindInsertionEntry(
return entry;
}
+// Force instantiation of template instances class.
+// Please note this list is compiler dependent.
+
+template class HashTable<SymbolTableShape, HashTableKey*>;
+
+template class HashTable<CompilationCacheShape, HashTableKey*>;
+
+template class HashTable<MapCacheShape, HashTableKey*>;
+
+template class Dictionary<StringDictionaryShape, String*>;
+
+template class Dictionary<NumberDictionaryShape, uint32_t>;
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::Allocate(
+ int);
+
+template Object* Dictionary<StringDictionaryShape, String*>::Allocate(
+ int);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::AtPut(
+ uint32_t, Object*);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::SlowReverseLookup(
+ Object*);
+
+template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
+ Object*);
+
+template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
+ FixedArray*, PropertyAttributes);
+
+template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
+ int, JSObject::DeleteMode);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
+ int, JSObject::DeleteMode);
+
+template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
+ FixedArray*);
-// Force instantiation of SymbolTable's base class
-template class HashTable<0, 1>;
+template int
+Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
+ PropertyAttributes);
+template Object* Dictionary<StringDictionaryShape, String*>::Add(
+ String*, Object*, PropertyDetails);
-// Force instantiation of Dictionary's base class
-template class HashTable<2, 3>;
+template Object*
+Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
+template int
+Dictionary<NumberDictionaryShape, uint32_t>::NumberOfElementsFilterAttributes(
+ PropertyAttributes);
-// Force instantiation of EvalCache's base class
-template class HashTable<0, 2>;
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::Add(
+ uint32_t, Object*, PropertyDetails);
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::EnsureCapacity(
+ int, uint32_t);
+
+template Object* Dictionary<StringDictionaryShape, String*>::EnsureCapacity(
+ int, String*);
+
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::AddEntry(
+ uint32_t, Object*, PropertyDetails, uint32_t);
+
+template Object* Dictionary<StringDictionaryShape, String*>::AddEntry(
+ String*, Object*, PropertyDetails, uint32_t);
+
+template
+int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
+
+template
+int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
@@ -6566,7 +6661,7 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// Must stay in dictionary mode, either because of requires_slow_elements,
// or because we are not going to sort (and therefore compact) all of the
// elements.
- Dictionary* dict = element_dictionary();
+ NumberDictionary* dict = element_dictionary();
HeapNumber* result_double = NULL;
if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
// Allocate space for result before we start mutating the object.
@@ -6576,9 +6671,9 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
}
int capacity = dict->Capacity();
- Object* obj = Dictionary::Allocate(dict->Capacity());
+ Object* obj = NumberDictionary::Allocate(dict->Capacity());
if (obj->IsFailure()) return obj;
- Dictionary* new_dict = Dictionary::cast(obj);
+ NumberDictionary* new_dict = NumberDictionary::cast(obj);
AssertNoAllocation no_alloc;
@@ -6639,7 +6734,7 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
if (!HasFastElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
- Dictionary* dict = element_dictionary();
+ NumberDictionary* dict = element_dictionary();
if (IsJSArray() || dict->requires_slow_elements() ||
dict->max_number_key() >= limit) {
return PrepareSlowElementsForSort(limit);
@@ -6731,7 +6826,7 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
}
-Object* JSGlobalObject::GetPropertyCell(LookupResult* result) {
+Object* GlobalObject::GetPropertyCell(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
ASSERT(value->IsJSGlobalPropertyCell());
@@ -6779,7 +6874,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
if (obj->IsFailure()) return obj;
// Create symbol object.
- Object* symbol = key->GetObject();
+ Object* symbol = key->AsObject();
if (symbol->IsFailure()) return symbol;
// If the symbol table grew as part of EnsureCapacity, obj is not
@@ -6788,7 +6883,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
SymbolTable* table = reinterpret_cast<SymbolTable*>(obj);
// Add the new symbol and return it along with the symbol table.
- entry = table->FindInsertionEntry(symbol, key->Hash());
+ entry = table->FindInsertionEntry(key->Hash());
table->set(EntryToIndex(entry), symbol);
table->ElementAdded();
*s = symbol;
@@ -6828,7 +6923,7 @@ Object* CompilationCacheTable::Put(String* src, Object* value) {
CompilationCacheTable* cache =
reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(src, key.Hash());
+ int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), src);
cache->set(EntryToIndex(entry) + 1, value);
cache->ElementAdded();
@@ -6845,9 +6940,9 @@ Object* CompilationCacheTable::PutEval(String* src,
CompilationCacheTable* cache =
reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(src, key.Hash());
+ int entry = cache->FindInsertionEntry(key.Hash());
- Object* k = key.GetObject();
+ Object* k = key.AsObject();
if (k->IsFailure()) return k;
cache->set(EntryToIndex(entry), k);
@@ -6866,7 +6961,7 @@ Object* CompilationCacheTable::PutRegExp(String* src,
CompilationCacheTable* cache =
reinterpret_cast<CompilationCacheTable*>(obj);
- int entry = cache->FindInsertionEntry(value, key.Hash());
+ int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), value);
cache->set(EntryToIndex(entry) + 1, value);
cache->ElementAdded();
@@ -6889,13 +6984,9 @@ class SymbolsKey : public HashTableKey {
return true;
}
- uint32_t Hash() { return SymbolsHash(symbols_); }
+ uint32_t Hash() { return HashForObject(symbols_); }
- HashFunction GetHashFunction() { return SymbolsHash; }
-
- Object* GetObject() { return symbols_; }
-
- static uint32_t SymbolsHash(Object* obj) {
+ uint32_t HashForObject(Object* obj) {
FixedArray* symbols = FixedArray::cast(obj);
int len = symbols->length();
uint32_t hash = 0;
@@ -6905,7 +6996,7 @@ class SymbolsKey : public HashTableKey {
return hash;
}
- bool IsStringKey() { return false; }
+ Object* AsObject() { return symbols_; }
private:
FixedArray* symbols_;
@@ -6926,7 +7017,7 @@ Object* MapCache::Put(FixedArray* array, Map* value) {
if (obj->IsFailure()) return obj;
MapCache* cache = reinterpret_cast<MapCache*>(obj);
- int entry = cache->FindInsertionEntry(array, key.Hash());
+ int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), array);
cache->set(EntryToIndex(entry) + 1, value);
cache->ElementAdded();
@@ -6934,19 +7025,21 @@ Object* MapCache::Put(FixedArray* array, Map* value) {
}
-Object* Dictionary::Allocate(int at_least_space_for) {
- Object* obj = DictionaryBase::Allocate(at_least_space_for);
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
+ Object* obj = HashTable<Shape, Key>::Allocate(at_least_space_for);
// Initialize the next enumeration index.
if (!obj->IsFailure()) {
- Dictionary::cast(obj)->
+ Dictionary<Shape, Key>::cast(obj)->
SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
}
return obj;
}
-Object* Dictionary::GenerateNewEnumerationIndices() {
- int length = NumberOfElements();
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
+ int length = HashTable<Shape, Key>::NumberOfElements();
// Allocate and initialize iteration order array.
Object* obj = Heap::AllocateFixedArray(length);
@@ -6962,10 +7055,10 @@ Object* Dictionary::GenerateNewEnumerationIndices() {
FixedArray* enumeration_order = FixedArray::cast(obj);
// Fill the enumeration order array with property details.
- int capacity = Capacity();
+ int capacity = HashTable<Shape, Key>::Capacity();
int pos = 0;
for (int i = 0; i < capacity; i++) {
- if (IsKey(KeyAt(i))) {
+ if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
enumeration_order->set(pos++,
Smi::FromInt(DetailsAt(i).index()),
SKIP_WRITE_BARRIER);
@@ -6985,10 +7078,10 @@ Object* Dictionary::GenerateNewEnumerationIndices() {
}
// Update the dictionary with new indices.
- capacity = Capacity();
+ capacity = HashTable<Shape, Key>::Capacity();
pos = 0;
for (int i = 0; i < capacity; i++) {
- if (IsKey(KeyAt(i))) {
+ if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
PropertyDetails details = DetailsAt(i);
PropertyDetails new_details =
@@ -7002,20 +7095,20 @@ Object* Dictionary::GenerateNewEnumerationIndices() {
return this;
}
-
-Object* Dictionary::EnsureCapacity(int n, HashTableKey* key) {
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) {
// Check whether there are enough enumeration indices to add n elements.
- if (key->IsStringKey() &&
+ if (Shape::kIsEnumerable &&
!PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) {
// If not, we generate new indices for the properties.
Object* result = GenerateNewEnumerationIndices();
if (result->IsFailure()) return result;
}
- return DictionaryBase::EnsureCapacity(n, key);
+ return HashTable<Shape, Key>::EnsureCapacity(n, key);
}
-void Dictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
+void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
// Do nothing if the interval [from, to) is empty.
if (from >= to) return;
@@ -7038,35 +7131,26 @@ void Dictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
}
-Object* Dictionary::DeleteProperty(int entry, JSObject::DeleteMode mode) {
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
+ JSObject::DeleteMode mode) {
PropertyDetails details = DetailsAt(entry);
// Ignore attributes if forcing a deletion.
if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) {
return Heap::false_value();
}
SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
- ElementRemoved();
+ HashTable<Shape, Key>::ElementRemoved();
return Heap::true_value();
}
-int Dictionary::FindStringEntry(String* key) {
- StringKey k(key);
- return FindEntry(&k);
-}
-
-
-int Dictionary::FindNumberEntry(uint32_t index) {
- NumberKey k(index);
- return FindEntry(&k);
-}
-
-
-Object* Dictionary::AtPut(HashTableKey* key, Object* value) {
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
int entry = FindEntry(key);
// If the entry is present set the value;
- if (entry != kNotFound) {
+ if (entry != Dictionary<Shape, Key>::kNotFound) {
ValueAtPut(entry, value);
return this;
}
@@ -7074,48 +7158,57 @@ Object* Dictionary::AtPut(HashTableKey* key, Object* value) {
// Check whether the dictionary should be extended.
Object* obj = EnsureCapacity(1, key);
if (obj->IsFailure()) return obj;
- Object* k = key->GetObject();
+
+ Object* k = Shape::AsObject(key);
if (k->IsFailure()) return k;
PropertyDetails details = PropertyDetails(NONE, NORMAL);
- Dictionary::cast(obj)->AddEntry(k, value, details, key->Hash());
- return obj;
+ return Dictionary<Shape, Key>::cast(obj)->
+ AddEntry(key, value, details, Shape::Hash(key));
}
-Object* Dictionary::Add(HashTableKey* key, Object* value,
- PropertyDetails details) {
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::Add(Key key,
+ Object* value,
+ PropertyDetails details) {
+ // Valdate key is absent.
+ SLOW_ASSERT((FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
Object* obj = EnsureCapacity(1, key);
if (obj->IsFailure()) return obj;
- // Compute the key object.
- Object* k = key->GetObject();
- if (k->IsFailure()) return k;
- Dictionary::cast(obj)->AddEntry(k, value, details, key->Hash());
- return obj;
+ return Dictionary<Shape, Key>::cast(obj)->
+ AddEntry(key, value, details, Shape::Hash(key));
}
// Add a key, value pair to the dictionary.
-void Dictionary::AddEntry(Object* key,
- Object* value,
- PropertyDetails details,
- uint32_t hash) {
- uint32_t entry = FindInsertionEntry(key, hash);
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::AddEntry(Key key,
+ Object* value,
+ PropertyDetails details,
+ uint32_t hash) {
+ // Compute the key object.
+ Object* k = Shape::AsObject(key);
+ if (k->IsFailure()) return k;
+
+ uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
// Insert element at empty or deleted entry
- if (details.index() == 0 && key->IsString()) {
+ if (details.index() == 0 && Shape::kIsEnumerable) {
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = NextEnumerationIndex();
details = PropertyDetails(details.attributes(), details.type(), index);
SetNextEnumerationIndex(index + 1);
}
- SetEntry(entry, key, value, details);
- ASSERT(KeyAt(entry)->IsNumber() || KeyAt(entry)->IsString());
- ElementAdded();
+ SetEntry(entry, k, value, details);
+ ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber()
+ || Dictionary<Shape, Key>::KeyAt(entry)->IsString()));
+ HashTable<Shape, Key>::ElementAdded();
+ return this;
}
-void Dictionary::UpdateMaxNumberKey(uint32_t key) {
+void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
// If the dictionary requires slow elements an element has already
// been added at a high index.
if (requires_slow_elements()) return;
@@ -7128,73 +7221,51 @@ void Dictionary::UpdateMaxNumberKey(uint32_t key) {
// Update max key value.
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi() || max_number_key() < key) {
- set(kMaxNumberKeyIndex,
- Smi::FromInt(key << kRequiresSlowElementsTagSize),
- SKIP_WRITE_BARRIER);
+ FixedArray::set(kMaxNumberKeyIndex,
+ Smi::FromInt(key << kRequiresSlowElementsTagSize),
+ SKIP_WRITE_BARRIER);
}
}
-Object* Dictionary::AddStringEntry(String* key,
- Object* value,
- PropertyDetails details) {
- StringKey k(key);
- SLOW_ASSERT(FindEntry(&k) == kNotFound);
- return Add(&k, value, details);
-}
-
-
-Object* Dictionary::AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details) {
- NumberKey k(key);
+Object* NumberDictionary::AddNumberEntry(uint32_t key,
+ Object* value,
+ PropertyDetails details) {
UpdateMaxNumberKey(key);
- SLOW_ASSERT(FindEntry(&k) == kNotFound);
- return Add(&k, value, details);
+ SLOW_ASSERT(FindEntry(key) == kNotFound);
+ return Add(key, value, details);
}
-Object* Dictionary::AtNumberPut(uint32_t key, Object* value) {
- NumberKey k(key);
+Object* NumberDictionary::AtNumberPut(uint32_t key, Object* value) {
UpdateMaxNumberKey(key);
- return AtPut(&k, value);
+ return AtPut(key, value);
}
-Object* Dictionary::SetStringEntry(int entry,
- String* key,
- Object* value,
- PropertyDetails details) {
+Object* NumberDictionary::Set(uint32_t key,
+ Object* value,
+ PropertyDetails details) {
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return AddNumberEntry(key, value, details);
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
DetailsAt(entry).index());
- SetEntry(entry, key, value, details);
+ SetEntry(entry, NumberDictionaryShape::AsObject(key), value, details);
return this;
}
-Object* Dictionary::SetOrAddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details) {
- NumberKey k(key);
- int entry = FindEntry(&k);
- if (entry == -1) return AddNumberEntry(key, value, details);
- // Preserve enumeration index.
- details = PropertyDetails(details.attributes(),
- details.type(),
- DetailsAt(entry).index());
- SetEntry(entry, k.GetObject(), value, details);
- return this;
-}
-
-int Dictionary::NumberOfElementsFilterAttributes(PropertyAttributes filter) {
- int capacity = Capacity();
+template<typename Shape, typename Key>
+int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
+ PropertyAttributes filter) {
+ int capacity = HashTable<Shape, Key>::Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
+ Object* k = HashTable<Shape, Key>::KeyAt(i);
+ if (HashTable<Shape, Key>::IsKey(k)) {
PropertyAttributes attr = DetailsAt(i).attributes();
if ((attr & filter) == 0) result++;
}
@@ -7203,19 +7274,22 @@ int Dictionary::NumberOfElementsFilterAttributes(PropertyAttributes filter) {
}
-int Dictionary::NumberOfEnumElements() {
+template<typename Shape, typename Key>
+int Dictionary<Shape, Key>::NumberOfEnumElements() {
return NumberOfElementsFilterAttributes(
static_cast<PropertyAttributes>(DONT_ENUM));
}
-void Dictionary::CopyKeysTo(FixedArray* storage, PropertyAttributes filter) {
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage,
+ PropertyAttributes filter) {
ASSERT(storage->length() >= NumberOfEnumElements());
- int capacity = Capacity();
+ int capacity = HashTable<Shape, Key>::Capacity();
int index = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
+ Object* k = HashTable<Shape, Key>::KeyAt(i);
+ if (HashTable<Shape, Key>::IsKey(k)) {
PropertyAttributes attr = DetailsAt(i).attributes();
if ((attr & filter) == 0) storage->set(index++, k);
}
@@ -7225,7 +7299,8 @@ void Dictionary::CopyKeysTo(FixedArray* storage, PropertyAttributes filter) {
}
-void Dictionary::CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array) {
+void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
+ FixedArray* sort_array) {
ASSERT(storage->length() >= NumberOfEnumElements());
int capacity = Capacity();
int index = 0;
@@ -7247,14 +7322,15 @@ void Dictionary::CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array) {
}
-void Dictionary::CopyKeysTo(FixedArray* storage) {
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) {
ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
static_cast<PropertyAttributes>(NONE)));
- int capacity = Capacity();
+ int capacity = HashTable<Shape, Key>::Capacity();
int index = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
+ Object* k = HashTable<Shape, Key>::KeyAt(i);
+ if (HashTable<Shape, Key>::IsKey(k)) {
storage->set(index++, k);
}
}
@@ -7263,11 +7339,12 @@ void Dictionary::CopyKeysTo(FixedArray* storage) {
// Backwards lookup (slow).
-Object* Dictionary::SlowReverseLookup(Object* value) {
- int capacity = Capacity();
+template<typename Shape, typename Key>
+Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
+ int capacity = HashTable<Shape, Key>::Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = KeyAt(i);
- if (IsKey(k)) {
+ Object* k = HashTable<Shape, Key>::KeyAt(i);
+ if (Dictionary<Shape, Key>::IsKey(k)) {
Object* e = ValueAt(i);
if (e->IsJSGlobalPropertyCell()) {
e = JSGlobalPropertyCell::cast(e)->value();
@@ -7279,8 +7356,8 @@ Object* Dictionary::SlowReverseLookup(Object* value) {
}
-Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj,
- int unused_property_fields) {
+Object* StringDictionary::TransformPropertiesToFastFor(
+ JSObject* obj, int unused_property_fields) {
// Make sure we preserve dictionary representation if there are too many
// descriptors.
if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
@@ -7288,7 +7365,8 @@ Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj,
// Figure out if it is necessary to generate new enumeration indices.
int max_enumeration_index =
NextEnumerationIndex() +
- (DescriptorArray::kMaxNumberOfDescriptors - NumberOfElements());
+ (DescriptorArray::kMaxNumberOfDescriptors -
+ NumberOfElements());
if (!PropertyDetails::IsValidIndex(max_enumeration_index)) {
Object* result = GenerateNewEnumerationIndices();
if (result->IsFailure()) return result;
@@ -7637,4 +7715,5 @@ int BreakPointInfo::GetBreakPointCount() {
}
#endif
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/objects.h b/V8Binding/v8/src/objects.h
index 775b6c7..ebd0bb4 100644
--- a/V8Binding/v8/src/objects.h
+++ b/V8Binding/v8/src/objects.h
@@ -1207,7 +1207,7 @@ class JSObject: public HeapObject {
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
inline void initialize_properties();
inline bool HasFastProperties();
- inline Dictionary* property_dictionary(); // Gets slow properties.
+ inline StringDictionary* property_dictionary(); // Gets slow properties.
// [elements]: The elements (properties with names that are integers).
// elements is a FixedArray in the fast case, and a Dictionary in the slow
@@ -1215,7 +1215,7 @@ class JSObject: public HeapObject {
DECL_ACCESSORS(elements, FixedArray) // Get and set fast elements.
inline void initialize_elements();
inline bool HasFastElements();
- inline Dictionary* element_dictionary(); // Gets slow elements.
+ inline NumberDictionary* element_dictionary(); // Gets slow elements.
// Collects elements starting at index 0.
// Undefined values are placed after non-undefined values.
@@ -1875,32 +1875,29 @@ class DescriptorArray: public FixedArray {
// - Elements with key == undefined have not been used yet.
// - Elements with key == null have been deleted.
//
-// The hash table class is parameterized with a prefix size and with
-// the size, including the key size, of the elements held in the hash
+// The hash table class is parameterized with a Shape and a Key.
+// Shape must be a class with the following interface:
+// class ExampleShape {
+// public:
+// // Tells whether key matches other.
+// static bool IsMatch(Key key, Object* other);
+// // Returns the hash value for key.
+// static uint32_t Hash(Key key);
+// // Returns the hash value for object.
+// static uint32_t HashForObject(Key key, Object* object);
+// // Convert key to an object.
+// static inline Object* AsObject(Key key);
+// // The prefix size indicates number of elements in the beginning
+// // of the backing storage.
+// static const int kPrefixSize = ..;
+// // The Element size indicates number of elements per entry.
+// static const int kEntrySize = ..;
+// };
// table. The prefix size indicates an amount of memory in the
// beginning of the backing storage that can be used for non-element
// information by subclasses.
-// HashTableKey is an abstract superclass keys.
-class HashTableKey {
- public:
- // Returns whether the other object matches this key.
- virtual bool IsMatch(Object* other) = 0;
- typedef uint32_t (*HashFunction)(Object* obj);
- // Returns the hash function used for this key.
- virtual HashFunction GetHashFunction() = 0;
- // Returns the hash value for this key.
- virtual uint32_t Hash() = 0;
- // Returns the key object for storing into the dictionary.
- // If allocations fails a failure object is returned.
- virtual Object* GetObject() = 0;
- virtual bool IsStringKey() = 0;
- // Required.
- virtual ~HashTableKey() {}
-};
-
-
-template<int prefix_size, int element_size>
+template<typename Shape, typename Key>
class HashTable: public FixedArray {
public:
// Returns the number of elements in the dictionary.
@@ -1949,25 +1946,27 @@ class HashTable: public FixedArray {
static const int kNumberOfElementsIndex = 0;
static const int kCapacityIndex = 1;
static const int kPrefixStartIndex = 2;
- static const int kElementsStartIndex = kPrefixStartIndex + prefix_size;
- static const int kElementSize = element_size;
+ static const int kElementsStartIndex =
+ kPrefixStartIndex + Shape::kPrefixSize;
+ static const int kEntrySize = Shape::kEntrySize;
static const int kElementsStartOffset =
kHeaderSize + kElementsStartIndex * kPointerSize;
// Constant used for denoting a absent entry.
static const int kNotFound = -1;
- protected:
// Find entry for key otherwise return -1.
- int FindEntry(HashTableKey* key);
+ int FindEntry(Key key);
+
+ protected:
// Find the entry at which to insert element with the given key that
// has the given hash value.
- uint32_t FindInsertionEntry(Object* key, uint32_t hash);
+ uint32_t FindInsertionEntry(uint32_t hash);
// Returns the index for an entry (of the key)
static inline int EntryToIndex(int entry) {
- return (entry * kElementSize) + kElementsStartIndex;
+ return (entry * kEntrySize) + kElementsStartIndex;
}
// Update the number of elements in the dictionary.
@@ -1992,15 +1991,51 @@ class HashTable: public FixedArray {
}
// Ensure enough space for n additional elements.
- Object* EnsureCapacity(int n, HashTableKey* key);
+ Object* EnsureCapacity(int n, Key key);
+};
+
+
+
+// HashTableKey is an abstract superclass for virtual key behavior.
+class HashTableKey {
+ public:
+ // Returns whether the other object matches this key.
+ virtual bool IsMatch(Object* other) = 0;
+ // Returns the hash value for this key.
+ virtual uint32_t Hash() = 0;
+ // Returns the hash value for object.
+ virtual uint32_t HashForObject(Object* key) = 0;
+ // Returns the key object for storing into the dictionary.
+ // If allocations fails a failure object is returned.
+ virtual Object* AsObject() = 0;
+ // Required.
+ virtual ~HashTableKey() {}
};
+class SymbolTableShape {
+ public:
+ static bool IsMatch(HashTableKey* key, Object* value) {
+ return key->IsMatch(value);
+ }
+ static uint32_t Hash(HashTableKey* key) {
+ return key->Hash();
+ }
+ static uint32_t HashForObject(HashTableKey* key, Object* object) {
+ return key->HashForObject(object);
+ }
+ static Object* AsObject(HashTableKey* key) {
+ return key->AsObject();
+ }
+
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 1;
+};
// SymbolTable.
//
// No special elements in the prefix and the element size is 1
// because only the symbol itself (the key) needs to be stored.
-class SymbolTable: public HashTable<0, 1> {
+class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
public:
// Find symbol in the symbol table. If it is not there yet, it is
// added. The return value is the symbol table which might have
@@ -2024,11 +2059,33 @@ class SymbolTable: public HashTable<0, 1> {
};
+class MapCacheShape {
+ public:
+ static bool IsMatch(HashTableKey* key, Object* value) {
+ return key->IsMatch(value);
+ }
+ static uint32_t Hash(HashTableKey* key) {
+ return key->Hash();
+ }
+
+ static uint32_t HashForObject(HashTableKey* key, Object* object) {
+ return key->HashForObject(object);
+ }
+
+ static Object* AsObject(HashTableKey* key) {
+ return key->AsObject();
+ }
+
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 2;
+};
+
+
// MapCache.
//
// Maps keys that are a fixed array of symbols to a map.
// Used for canonicalize maps for object literals.
-class MapCache: public HashTable<0, 2> {
+class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
public:
// Find cached value for a string key, otherwise return null.
Object* Lookup(FixedArray* key);
@@ -2040,74 +2097,42 @@ class MapCache: public HashTable<0, 2> {
};
-// Dictionary for keeping properties and elements in slow case.
-//
-// One element in the prefix is used for storing non-element
-// information about the dictionary.
-//
-// The rest of the array embeds triples of (key, value, details).
-// if key == undefined the triple is empty.
-// if key == null the triple has been deleted.
-// otherwise key contains the name of a property.
-class DictionaryBase: public HashTable<2, 3> {};
-
-class Dictionary: public DictionaryBase {
+template <typename Shape, typename Key>
+class Dictionary: public HashTable<Shape, Key> {
public:
+
+ static inline Dictionary<Shape, Key>* cast(Object* obj) {
+ return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
+ }
+
// Returns the value at entry.
Object* ValueAt(int entry) {
- return get(EntryToIndex(entry)+1);
+ return get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
}
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
- set(EntryToIndex(entry)+1, value);
+ set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
}
// Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) {
ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
- return PropertyDetails(Smi::cast(get(EntryToIndex(entry) + 2)));
+ return PropertyDetails(
+ Smi::cast(get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
}
// Set the details for entry.
void DetailsAtPut(int entry, PropertyDetails value) {
- set(EntryToIndex(entry) + 2, value.AsSmi());
+ set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
}
- // Remove all entries were key is a number and (from <= key && key < to).
- void RemoveNumberEntries(uint32_t from, uint32_t to);
-
// Sorting support
void CopyValuesTo(FixedArray* elements);
- // Casting.
- static inline Dictionary* cast(Object* obj);
-
- // Find entry for string key otherwise return -1.
- int FindStringEntry(String* key);
-
- // Find entry for number key otherwise return -1.
- int FindNumberEntry(uint32_t index);
-
// Delete a property from the dictionary.
Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
- // Type specific at put (default NONE attributes is used when adding).
- Object* AtNumberPut(uint32_t key, Object* value);
-
- Object* AddStringEntry(String* key, Object* value, PropertyDetails details);
- Object* AddNumberEntry(uint32_t key, Object* value, PropertyDetails details);
-
- // Set an existing entry or add a new one if needed.
- Object* SetStringEntry(int entry,
- String* key,
- Object* value,
- PropertyDetails details);
-
- Object* SetOrAddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details);
-
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
int NumberOfElementsFilterAttributes(PropertyAttributes filter);
@@ -2117,42 +2142,23 @@ class Dictionary: public DictionaryBase {
// Copies keys to preallocated fixed array.
void CopyKeysTo(FixedArray* storage, PropertyAttributes filter);
- // Copies enumerable keys to preallocated fixed array.
- void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array);
// Fill in details for properties into storage.
void CopyKeysTo(FixedArray* storage);
- // For transforming properties of a JSObject.
- Object* TransformPropertiesToFastFor(JSObject* obj,
- int unused_property_fields);
-
- // If slow elements are required we will never go back to fast-case
- // for the elements kept in this dictionary. We require slow
- // elements if an element has been added at an index larger than
- // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called
- // when defining a getter or setter with a number key.
- inline bool requires_slow_elements();
- inline void set_requires_slow_elements();
-
- // Get the value of the max number key that has been added to this
- // dictionary. max_number_key can only be called if
- // requires_slow_elements returns false.
- inline uint32_t max_number_key();
-
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
}
int NextEnumerationIndex() {
- return Smi::cast(get(kNextEnumerationIndexIndex))->value();
+ return Smi::cast(FixedArray::get(kNextEnumerationIndexIndex))->value();
}
// Returns a new array for dictionary usage. Might return Failure.
static Object* Allocate(int at_least_space_for);
// Ensure enough space for n additional elements.
- Object* EnsureCapacity(int n, HashTableKey* key);
+ Object* EnsureCapacity(int n, Key key);
#ifdef DEBUG
void Print();
@@ -2160,38 +2166,110 @@ class Dictionary: public DictionaryBase {
// Returns the key (slow).
Object* SlowReverseLookup(Object* value);
- // Bit masks.
- static const int kRequiresSlowElementsMask = 1;
- static const int kRequiresSlowElementsTagSize = 1;
- static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1;
-
- void UpdateMaxNumberKey(uint32_t key);
-
- private:
- // Generic at put operation.
- Object* AtPut(HashTableKey* key, Object* value);
-
- Object* Add(HashTableKey* key, Object* value, PropertyDetails details);
-
- // Add entry to dictionary.
- void AddEntry(Object* key,
- Object* value,
- PropertyDetails details,
- uint32_t hash);
-
// Sets the entry to (key, value) pair.
inline void SetEntry(int entry,
Object* key,
Object* value,
PropertyDetails details);
+ Object* Add(Key key, Object* value, PropertyDetails details);
+
+ protected:
+ // Generic at put operation.
+ Object* AtPut(Key key, Object* value);
+
+ // Add entry to dictionary.
+ Object* AddEntry(Key key,
+ Object* value,
+ PropertyDetails details,
+ uint32_t hash);
+
// Generate new enumeration indices to avoid enumeration index overflow.
Object* GenerateNewEnumerationIndices();
-
- static const int kMaxNumberKeyIndex = kPrefixStartIndex;
+ static const int kMaxNumberKeyIndex =
+ HashTable<Shape, Key>::kPrefixStartIndex;
static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
+};
+
+
+class StringDictionaryShape {
+ public:
+ static inline bool IsMatch(String* key, Object* other);
+ static inline uint32_t Hash(String* key);
+ static inline uint32_t HashForObject(String* key, Object* object);
+ static inline Object* AsObject(String* key);
+ static const int kPrefixSize = 2;
+ static const int kEntrySize = 3;
+ static const bool kIsEnumerable = true;
+};
+
+
+class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
+ public:
+ static inline StringDictionary* cast(Object* obj) {
+ ASSERT(obj->IsDictionary());
+ return reinterpret_cast<StringDictionary*>(obj);
+ }
+
+ // Copies enumerable keys to preallocated fixed array.
+ void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array);
+
+ // For transforming properties of a JSObject.
+ Object* TransformPropertiesToFastFor(JSObject* obj,
+ int unused_property_fields);
+};
+
+
+class NumberDictionaryShape {
+ public:
+ static inline bool IsMatch(uint32_t key, Object* other);
+ static inline uint32_t Hash(uint32_t key);
+ static inline uint32_t HashForObject(uint32_t key, Object* object);
+ static inline Object* AsObject(uint32_t key);
+ static const int kPrefixSize = 2;
+ static const int kEntrySize = 3;
+ static const bool kIsEnumerable = false;
+};
+
+
+class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
+ public:
+ static NumberDictionary* cast(Object* obj) {
+ ASSERT(obj->IsDictionary());
+ return reinterpret_cast<NumberDictionary*>(obj);
+ }
+
+ // Type specific at put (default NONE attributes is used when adding).
+ Object* AtNumberPut(uint32_t key, Object* value);
+ Object* AddNumberEntry(uint32_t key,
+ Object* value,
+ PropertyDetails details);
+
+ // Set an existing entry or add a new one if needed.
+ Object* Set(uint32_t key, Object* value, PropertyDetails details);
- DISALLOW_IMPLICIT_CONSTRUCTORS(Dictionary);
+ void UpdateMaxNumberKey(uint32_t key);
+
+ // If slow elements are required we will never go back to fast-case
+ // for the elements kept in this dictionary. We require slow
+ // elements if an element has been added at an index larger than
+ // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called
+ // when defining a getter or setter with a number key.
+ inline bool requires_slow_elements();
+ inline void set_requires_slow_elements();
+
+ // Get the value of the max number key that has been added to this
+ // dictionary. max_number_key can only be called if
+ // requires_slow_elements returns false.
+ inline uint32_t max_number_key();
+
+ // Remove all entries were key is a number and (from <= key && key < to).
+ void RemoveNumberEntries(uint32_t from, uint32_t to);
+
+ // Bit masks.
+ static const int kRequiresSlowElementsMask = 1;
+ static const int kRequiresSlowElementsTagSize = 1;
+ static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1;
};
@@ -3059,6 +3137,9 @@ class GlobalObject: public JSObject {
// [global receiver]: the global receiver object of the context
DECL_ACCESSORS(global_receiver, JSObject)
+ // Retrieve the property cell used to store a property.
+ Object* GetPropertyCell(LookupResult* result);
+
// Casting.
static inline GlobalObject* cast(Object* obj);
@@ -3079,9 +3160,6 @@ class GlobalObject: public JSObject {
class JSGlobalObject: public GlobalObject {
public:
- // Retrieve the property cell used to store a property.
- Object* GetPropertyCell(LookupResult* result);
-
// Casting.
static inline JSGlobalObject* cast(Object* obj);
@@ -3231,7 +3309,30 @@ class JSRegExp: public JSObject {
};
-class CompilationCacheTable: public HashTable<0, 2> {
+class CompilationCacheShape {
+ public:
+ static inline bool IsMatch(HashTableKey* key, Object* value) {
+ return key->IsMatch(value);
+ }
+
+ static inline uint32_t Hash(HashTableKey* key) {
+ return key->Hash();
+ }
+
+ static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
+ return key->HashForObject(object);
+ }
+
+ static Object* AsObject(HashTableKey* key) {
+ return key->AsObject();
+ }
+
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 2;
+};
+
+class CompilationCacheTable: public HashTable<CompilationCacheShape,
+ HashTableKey*> {
public:
// Find cached value for a string key, otherwise return null.
Object* Lookup(String* src);
diff --git a/V8Binding/v8/src/parser.cc b/V8Binding/v8/src/parser.cc
index 2b4be79..e1d9b71 100644
--- a/V8Binding/v8/src/parser.cc
+++ b/V8Binding/v8/src/parser.cc
@@ -361,7 +361,7 @@ class BufferedZoneList {
};
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder {
+class RegExpBuilder: public ZoneObject {
public:
RegExpBuilder();
void AddCharacter(uc16 character);
@@ -392,7 +392,10 @@ class RegExpBuilder {
RegExpBuilder::RegExpBuilder()
- : pending_empty_(false), characters_(NULL), terms_(), alternatives_()
+ : pending_empty_(false),
+ characters_(NULL),
+ terms_(),
+ alternatives_()
#ifdef DEBUG
, last_added_(ADD_NONE)
#endif
@@ -594,6 +597,44 @@ class RegExpParser {
static const int kMaxCaptures = 1 << 16;
static const uc32 kEndMarker = (1 << 21);
private:
+ enum SubexpressionType {
+ INITIAL,
+ CAPTURE, // All positive values represent captures.
+ POSITIVE_LOOKAHEAD,
+ NEGATIVE_LOOKAHEAD,
+ GROUPING
+ };
+
+ class RegExpParserState : public ZoneObject {
+ public:
+ RegExpParserState(RegExpParserState* previous_state,
+ SubexpressionType group_type,
+ int disjunction_capture_index)
+ : previous_state_(previous_state),
+ builder_(new RegExpBuilder()),
+ group_type_(group_type),
+ disjunction_capture_index_(disjunction_capture_index) {}
+ // Parser state of containing expression, if any.
+ RegExpParserState* previous_state() { return previous_state_; }
+ bool IsSubexpression() { return previous_state_ != NULL; }
+ // RegExpBuilder building this regexp's AST.
+ RegExpBuilder* builder() { return builder_; }
+ // Type of regexp being parsed (parenthesized group or entire regexp).
+ SubexpressionType group_type() { return group_type_; }
+ // Index in captures array of first capture in this sub-expression, if any.
+ // Also the capture index of this sub-expression itself, if group_type
+ // is CAPTURE.
+ int capture_index() { return disjunction_capture_index_; }
+ private:
+ // Linked list implementation of stack of states.
+ RegExpParserState* previous_state_;
+ // Builder for the stored disjunction.
+ RegExpBuilder* builder_;
+ // Stored disjunction type (capture, look-ahead or grouping), if any.
+ SubexpressionType group_type_;
+ // Stored disjunction's capture index (if any).
+ int disjunction_capture_index_;
+ };
uc32 current() { return current_; }
bool has_more() { return has_more_; }
@@ -601,7 +642,6 @@ class RegExpParser {
uc32 Next();
FlatStringReader* in() { return in_; }
void ScanForCaptures();
- bool CaptureAvailable(int index);
uc32 current_;
bool has_more_;
bool multiline_;
@@ -3808,9 +3848,7 @@ RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
// Disjunction
RegExpTree* RegExpParser::ParsePattern() {
RegExpTree* result = ParseDisjunction(CHECK_FAILED);
- if (has_more()) {
- ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
- }
+ ASSERT(!has_more());
// If the result of parsing is a literal string atom, and it has the
// same length as the input, then the atom is identical to the input.
if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
@@ -3820,14 +3858,6 @@ RegExpTree* RegExpParser::ParsePattern() {
}
-bool RegExpParser::CaptureAvailable(int index) {
- if (captures_ == NULL) return false;
- if (index >= captures_->length()) return false;
- RegExpCapture* capture = captures_->at(index);
- return capture != NULL && capture->available() == CAPTURE_AVAILABLE;
-}
-
-
// Disjunction ::
// Alternative
// Alternative | Disjunction
@@ -3839,24 +3869,60 @@ bool RegExpParser::CaptureAvailable(int index) {
// Atom
// Atom Quantifier
RegExpTree* RegExpParser::ParseDisjunction() {
- RegExpBuilder builder;
- int capture_start_index = captures_started();
+ // Used to store current state while parsing subexpressions.
+ RegExpParserState initial_state(NULL, INITIAL, 0);
+ RegExpParserState* stored_state = &initial_state;
+ // Cache the builder in a local variable for quick access.
+ RegExpBuilder* builder = initial_state.builder();
while (true) {
switch (current()) {
case kEndMarker:
- case ')':
- return builder.ToRegExp();
- case '|': {
+ if (stored_state->IsSubexpression()) {
+ // Inside a parenthesized group when hitting end of input.
+ ReportError(CStrVector("Unterminated group") CHECK_FAILED);
+ }
+ ASSERT_EQ(INITIAL, stored_state->group_type());
+ // Parsing completed successfully.
+ return builder->ToRegExp();
+ case ')': {
+ if (!stored_state->IsSubexpression()) {
+ ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
+ }
+ ASSERT_NE(INITIAL, stored_state->group_type());
+
Advance();
- builder.NewAlternative();
- int capture_new_alt_start_index = captures_started();
- for (int i = capture_start_index; i < capture_new_alt_start_index; i++) {
- RegExpCapture* capture = captures_->at(i);
- if (capture->available() == CAPTURE_AVAILABLE) {
- capture->set_available(CAPTURE_UNREACHABLE);
- }
+ // End disjunction parsing and convert builder content to new single
+ // regexp atom.
+ RegExpTree* body = builder->ToRegExp();
+
+ int end_capture_index = captures_started();
+
+ int capture_index = stored_state->capture_index();
+ SubexpressionType type = stored_state->group_type();
+
+ // Restore previous state.
+ stored_state = stored_state->previous_state();
+ builder = stored_state->builder();
+
+ // Build result of subexpression.
+ if (type == CAPTURE) {
+ RegExpCapture* capture = new RegExpCapture(body, capture_index);
+ captures_->at(capture_index - 1) = capture;
+ body = capture;
+ } else if (type != GROUPING) {
+ ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
+ bool is_positive = (type == POSITIVE_LOOKAHEAD);
+ body = new RegExpLookahead(body,
+ is_positive,
+ end_capture_index - capture_index,
+ capture_index);
}
- capture_start_index = capture_new_alt_start_index;
+ builder->AddAtom(body);
+ break;
+ }
+ case '|': {
+ Advance();
+ builder->NewAlternative();
continue;
}
case '*':
@@ -3866,10 +3932,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '^': {
Advance();
if (multiline_) {
- builder.AddAssertion(
+ builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::START_OF_LINE));
} else {
- builder.AddAssertion(
+ builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::START_OF_INPUT));
set_contains_anchor();
}
@@ -3880,7 +3946,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpAssertion::Type type =
multiline_ ? RegExpAssertion::END_OF_LINE :
RegExpAssertion::END_OF_INPUT;
- builder.AddAssertion(new RegExpAssertion(type));
+ builder->AddAssertion(new RegExpAssertion(type));
continue;
}
case '.': {
@@ -3889,17 +3955,47 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
CharacterRange::AddClassEscape('.', ranges);
RegExpTree* atom = new RegExpCharacterClass(ranges, false);
- builder.AddAtom(atom);
+ builder->AddAtom(atom);
break;
}
case '(': {
- RegExpTree* atom = ParseGroup(CHECK_FAILED);
- builder.AddAtom(atom);
+ SubexpressionType type = CAPTURE;
+ Advance();
+ if (current() == '?') {
+ switch (Next()) {
+ case ':':
+ type = GROUPING;
+ break;
+ case '=':
+ type = POSITIVE_LOOKAHEAD;
+ break;
+ case '!':
+ type = NEGATIVE_LOOKAHEAD;
+ break;
+ default:
+ ReportError(CStrVector("Invalid group") CHECK_FAILED);
+ break;
+ }
+ Advance(2);
+ } else {
+ if (captures_ == NULL) {
+ captures_ = new ZoneList<RegExpCapture*>(2);
+ }
+ if (captures_started() >= kMaxCaptures) {
+ ReportError(CStrVector("Too many captures") CHECK_FAILED);
+ }
+ captures_->Add(NULL);
+ }
+ // Store current state and begin new disjunction parsing.
+ stored_state = new RegExpParserState(stored_state,
+ type,
+ captures_started());
+ builder = stored_state->builder();
break;
}
case '[': {
RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
- builder.AddAtom(atom);
+ builder->AddAtom(atom);
break;
}
// Atom ::
@@ -3910,12 +4006,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED);
case 'b':
Advance(2);
- builder.AddAssertion(
+ builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::BOUNDARY));
continue;
case 'B':
Advance(2);
- builder.AddAssertion(
+ builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
continue;
// AtomEscape ::
@@ -3929,27 +4025,29 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
CharacterRange::AddClassEscape(c, ranges);
RegExpTree* atom = new RegExpCharacterClass(ranges, false);
- builder.AddAtom(atom);
- goto has_read_atom; // Avoid setting has_character_escapes_.
+ builder->AddAtom(atom);
+ break;
}
case '1': case '2': case '3': case '4': case '5': case '6':
case '7': case '8': case '9': {
int index = 0;
if (ParseBackReferenceIndex(&index)) {
- if (!CaptureAvailable(index - 1)) {
- // Prepare to ignore a following quantifier
- builder.AddEmpty();
- goto has_read_atom;
+ RegExpCapture* capture = NULL;
+ if (captures_ != NULL && index <= captures_->length()) {
+ capture = captures_->at(index - 1);
+ }
+ if (capture == NULL) {
+ builder->AddEmpty();
+ break;
}
- RegExpCapture* capture = captures_->at(index - 1);
RegExpTree* atom = new RegExpBackReference(capture);
- builder.AddAtom(atom);
- goto has_read_atom; // Avoid setting has_character_escapes_.
+ builder->AddAtom(atom);
+ break;
}
uc32 first_digit = Next();
if (first_digit == '8' || first_digit == '9') {
// Treat as identity escape
- builder.AddCharacter(first_digit);
+ builder->AddCharacter(first_digit);
Advance(2);
break;
}
@@ -3958,44 +4056,44 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '0': {
Advance();
uc32 octal = ParseOctalLiteral();
- builder.AddCharacter(octal);
+ builder->AddCharacter(octal);
break;
}
// ControlEscape :: one of
// f n r t v
case 'f':
Advance(2);
- builder.AddCharacter('\f');
+ builder->AddCharacter('\f');
break;
case 'n':
Advance(2);
- builder.AddCharacter('\n');
+ builder->AddCharacter('\n');
break;
case 'r':
Advance(2);
- builder.AddCharacter('\r');
+ builder->AddCharacter('\r');
break;
case 't':
Advance(2);
- builder.AddCharacter('\t');
+ builder->AddCharacter('\t');
break;
case 'v':
Advance(2);
- builder.AddCharacter('\v');
+ builder->AddCharacter('\v');
break;
case 'c': {
Advance(2);
uc32 control = ParseControlLetterEscape();
- builder.AddCharacter(control);
+ builder->AddCharacter(control);
break;
}
case 'x': {
Advance(2);
uc32 value;
if (ParseHexEscape(2, &value)) {
- builder.AddCharacter(value);
+ builder->AddCharacter(value);
} else {
- builder.AddCharacter('x');
+ builder->AddCharacter('x');
}
break;
}
@@ -4003,15 +4101,15 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
uc32 value;
if (ParseHexEscape(4, &value)) {
- builder.AddCharacter(value);
+ builder->AddCharacter(value);
} else {
- builder.AddCharacter('u');
+ builder->AddCharacter('u');
}
break;
}
default:
// Identity escape.
- builder.AddCharacter(Next());
+ builder->AddCharacter(Next());
Advance(2);
break;
}
@@ -4024,12 +4122,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// fallthrough
}
default:
- builder.AddCharacter(current());
+ builder->AddCharacter(current());
Advance();
break;
} // end switch(current())
- has_read_atom:
int min;
int max;
switch (current()) {
@@ -4071,7 +4168,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
is_greedy = false;
Advance();
}
- builder.AddQuantifierToAtom(min, max, is_greedy);
+ builder->AddQuantifierToAtom(min, max, is_greedy);
}
}
@@ -4382,73 +4479,6 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
}
-RegExpTree* RegExpParser::ParseGroup() {
- ASSERT_EQ(current(), '(');
- char type = '(';
- Advance();
- if (current() == '?') {
- switch (Next()) {
- case ':': case '=': case '!':
- type = Next();
- Advance(2);
- break;
- default:
- ReportError(CStrVector("Invalid group") CHECK_FAILED);
- break;
- }
- } else {
- if (captures_ == NULL) {
- captures_ = new ZoneList<RegExpCapture*>(2);
- }
- if (captures_started() >= kMaxCaptures) {
- ReportError(CStrVector("Too many captures") CHECK_FAILED);
- }
- captures_->Add(NULL);
- }
- int capture_index = captures_started();
- RegExpTree* body = ParseDisjunction(CHECK_FAILED);
- if (current() != ')') {
- ReportError(CStrVector("Unterminated group") CHECK_FAILED);
- }
- Advance();
-
- int end_capture_index = captures_started();
- if (type == '!') {
- // Captures inside a negative lookahead are never available outside it.
- for (int i = capture_index; i < end_capture_index; i++) {
- RegExpCapture* capture = captures_->at(i);
- ASSERT(capture != NULL);
- capture->set_available(CAPTURE_PERMANENTLY_UNREACHABLE);
- }
- } else {
- // Captures temporarily unavailable because they are in different
- // alternatives are all available after the disjunction.
- for (int i = capture_index; i < end_capture_index; i++) {
- RegExpCapture* capture = captures_->at(i);
- ASSERT(capture != NULL);
- if (capture->available() == CAPTURE_UNREACHABLE) {
- capture->set_available(CAPTURE_AVAILABLE);
- }
- }
- }
-
- if (type == '(') {
- RegExpCapture* capture = new RegExpCapture(body, capture_index);
- captures_->at(capture_index - 1) = capture;
- return capture;
- } else if (type == ':') {
- return body;
- } else {
- ASSERT(type == '=' || type == '!');
- bool is_positive = (type == '=');
- return new RegExpLookahead(body,
- is_positive,
- end_capture_index - capture_index,
- capture_index);
- }
-}
-
-
CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
ASSERT_EQ(0, *char_class);
uc32 first = current();
diff --git a/V8Binding/v8/src/platform-freebsd.cc b/V8Binding/v8/src/platform-freebsd.cc
index acef74c..92d72f8 100644
--- a/V8Binding/v8/src/platform-freebsd.cc
+++ b/V8Binding/v8/src/platform-freebsd.cc
@@ -561,6 +561,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.sp = mcontext.mc_esp;
sample.fp = mcontext.mc_ebp;
#endif
+ active_sampler_->SampleStack(&sample);
}
// We always sample the VM state.
diff --git a/V8Binding/v8/src/platform-linux.cc b/V8Binding/v8/src/platform-linux.cc
index 39495ab..bccf9e6 100644
--- a/V8Binding/v8/src/platform-linux.cc
+++ b/V8Binding/v8/src/platform-linux.cc
@@ -639,6 +639,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.fp = mcontext.arm_fp;
#endif
#endif
+ active_sampler_->SampleStack(&sample);
}
// We always sample the VM state.
diff --git a/V8Binding/v8/src/platform-macos.cc b/V8Binding/v8/src/platform-macos.cc
index f5b6458..880931e 100644
--- a/V8Binding/v8/src/platform-macos.cc
+++ b/V8Binding/v8/src/platform-macos.cc
@@ -38,6 +38,7 @@
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
+#include <mach/mach.h>
#include <mach/semaphore.h>
#include <mach/task.h>
#include <sys/time.h>
@@ -475,63 +476,94 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static Sampler* active_sampler_ = NULL;
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample;
+class Sampler::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(Sampler* sampler)
+ : sampler_(sampler),
+ task_self_(mach_task_self()),
+ profiled_thread_(0),
+ sampler_thread_(0) {
+ }
- // If profiling, we extract the current pc and sp.
- if (active_sampler_->IsProfiling()) {
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
+ Sampler* sampler_;
+ // Note: for profiled_thread_ Mach primitives are used instead of PThread's
+ // because the latter doesn't provide thread manipulation primitives required.
+ // For details, consult "Mac OS X Internals" book, Section 7.3.
+ mach_port_t task_self_;
+ thread_act_t profiled_thread_;
+ pthread_t sampler_thread_;
+
+ // Sampler thread handler.
+ void Runner() {
+ // Loop until the sampler is disengaged.
+ while (sampler_->IsActive()) {
+ TickSample sample;
+
+ // If profiling, we record the pc and sp of the profiled thread.
+ if (sampler_->IsProfiling()
+ && KERN_SUCCESS == thread_suspend(profiled_thread_)) {
#if V8_HOST_ARCH_X64
- UNIMPLEMENTED();
- USE(mcontext);
- sample.pc = 0;
- sample.sp = 0;
- sample.fp = 0;
+ thread_state_flavor_t flavor = x86_THREAD_STATE64;
+ x86_thread_state64_t state;
+ mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+#elif V8_HOST_ARCH_IA32
+ thread_state_flavor_t flavor = i386_THREAD_STATE;
+ i386_thread_state_t state;
+ mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+#else
+#error Unsupported Mac OS X host architecture.
+#endif // V8_TARGET_ARCH_IA32
+ if (thread_get_state(profiled_thread_,
+ flavor,
+ reinterpret_cast<natural_t*>(&state),
+ &count) == KERN_SUCCESS) {
+#if V8_HOST_ARCH_X64
+ UNIMPLEMENTED();
+ sample.pc = 0;
+ sample.sp = 0;
+ sample.fp = 0;
#elif V8_HOST_ARCH_IA32
#if __DARWIN_UNIX03
- sample.pc = mcontext->__ss.__eip;
- sample.sp = mcontext->__ss.__esp;
- sample.fp = mcontext->__ss.__ebp;
+ sample.pc = state.__eip;
+ sample.sp = state.__esp;
+ sample.fp = state.__ebp;
#else // !__DARWIN_UNIX03
- sample.pc = mcontext->ss.eip;
- sample.sp = mcontext->ss.esp;
- sample.fp = mcontext->ss.ebp;
+ sample.pc = state.eip;
+ sample.sp = state.esp;
+ sample.fp = state.ebp;
#endif // __DARWIN_UNIX03
#else
#error Unsupported Mac OS X host architecture.
#endif // V8_HOST_ARCH_IA32
+ sampler_->SampleStack(&sample);
+ }
+ thread_resume(profiled_thread_);
+ }
+
+ // We always sample the VM state.
+ sample.state = Logger::state();
+ // Invoke tick handler with program counter and stack pointer.
+ sampler_->Tick(&sample);
+
+ // Wait until next sampling.
+ usleep(sampler_->interval_ * 1000);
+ }
}
+};
- // We always sample the VM state.
- sample.state = Logger::state();
- active_sampler_->Tick(&sample);
+// Entry point for sampler thread.
+static void* SamplerEntry(void* arg) {
+ Sampler::PlatformData* data =
+ reinterpret_cast<Sampler::PlatformData*>(arg);
+ data->Runner();
+ return 0;
}
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- signal_handler_installed_ = false;
- }
-
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
-};
-
-
Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) {
- data_ = new PlatformData();
+ data_ = new PlatformData(this);
}
@@ -541,43 +573,40 @@ Sampler::~Sampler() {
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- if (active_sampler_ != NULL) return;
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
+ // If we are profiling, we need to be able to access the calling
+ // thread.
+ if (IsProfiling()) {
+ data_->profiled_thread_ = mach_thread_self();
+ }
+
+ // Create sampler thread with high priority.
+ // According to POSIX spec, when SCHED_FIFO policy is used, a thread
+ // runs until it exits or blocks.
+ pthread_attr_t sched_attr;
+ sched_param fifo_param;
+ pthread_attr_init(&sched_attr);
+ pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED);
+ pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO);
+ fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
+ pthread_attr_setschedparam(&sched_attr, &fifo_param);
+
active_ = true;
+ pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
}
void Sampler::Stop() {
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
+ // Seting active to false triggers termination of the sampler
+ // thread.
active_ = false;
+
+ // Wait for sampler thread to terminate.
+ pthread_join(data_->sampler_thread_, NULL);
+
+ // Deallocate Mach port for thread.
+ if (IsProfiling()) {
+ mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
+ }
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/V8Binding/v8/src/platform-win32.cc b/V8Binding/v8/src/platform-win32.cc
index 1b0f9b2..a8a6243 100644
--- a/V8Binding/v8/src/platform-win32.cc
+++ b/V8Binding/v8/src/platform-win32.cc
@@ -1776,32 +1776,30 @@ class Sampler::PlatformData : public Malloced {
TickSample sample;
// If profiling, we record the pc and sp of the profiled thread.
- if (sampler_->IsProfiling()) {
- // Pause the profiled thread and get its context.
- SuspendThread(profiled_thread_);
+ if (sampler_->IsProfiling()
+ && SuspendThread(profiled_thread_) != (DWORD)-1) {
context.ContextFlags = CONTEXT_FULL;
- GetThreadContext(profiled_thread_, &context);
- // Invoke tick handler with program counter and stack pointer.
+ if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64
- UNIMPLEMENTED();
- sample.pc = context.Rip;
- sample.sp = context.Rsp;
- sample.fp = context.Rbp;
+ UNIMPLEMENTED();
+ sample.pc = context.Rip;
+ sample.sp = context.Rsp;
+ sample.fp = context.Rbp;
#else
- sample.pc = context.Eip;
- sample.sp = context.Esp;
- sample.fp = context.Ebp;
+ sample.pc = context.Eip;
+ sample.sp = context.Esp;
+ sample.fp = context.Ebp;
#endif
+ sampler_->SampleStack(&sample);
+ }
+ ResumeThread(profiled_thread_);
}
// We always sample the VM state.
sample.state = Logger::state();
+ // Invoke tick handler with program counter and stack pointer.
sampler_->Tick(&sample);
- if (sampler_->IsProfiling()) {
- ResumeThread(profiled_thread_);
- }
-
// Wait until next sampling.
Sleep(sampler_->interval_);
}
diff --git a/V8Binding/v8/src/platform.h b/V8Binding/v8/src/platform.h
index b5123c5..11a1e79 100644
--- a/V8Binding/v8/src/platform.h
+++ b/V8Binding/v8/src/platform.h
@@ -510,6 +510,9 @@ class Sampler {
explicit Sampler(int interval, bool profiling);
virtual ~Sampler();
+ // Performs stack sampling.
+ virtual void SampleStack(TickSample* sample) = 0;
+
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
@@ -527,8 +530,8 @@ class Sampler {
class PlatformData;
private:
- int interval_;
- bool profiling_;
+ const int interval_;
+ const bool profiling_;
bool active_;
PlatformData* data_; // Platform specific data.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
diff --git a/V8Binding/v8/src/property.h b/V8Binding/v8/src/property.h
index 851bae2..69e5640 100644
--- a/V8Binding/v8/src/property.h
+++ b/V8Binding/v8/src/property.h
@@ -260,7 +260,7 @@ class LookupResult BASE_EMBEDDED {
case NORMAL: {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
- if (holder()->IsJSGlobalObject()) {
+ if (holder()->IsGlobalObject()) {
value = JSGlobalPropertyCell::cast(value)->value();
}
return value;
diff --git a/V8Binding/v8/src/runtime.cc b/V8Binding/v8/src/runtime.cc
index 28254f7..aeda068 100644
--- a/V8Binding/v8/src/runtime.cc
+++ b/V8Binding/v8/src/runtime.cc
@@ -168,7 +168,7 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
}
}
} else {
- Dictionary* element_dictionary = copy->element_dictionary();
+ NumberDictionary* element_dictionary = copy->element_dictionary();
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = element_dictionary->KeyAt(i);
@@ -769,17 +769,23 @@ static Object* Runtime_InitializeVarGlobal(Arguments args) {
PropertyAttributes attributes = DONT_DELETE;
// Lookup the property locally in the global object. If it isn't
- // there, we add the property and take special precautions to always
- // add it as a local property even in case of callbacks in the
- // prototype chain (this rules out using SetProperty).
- // We have IgnoreAttributesAndSetLocalProperty for this.
+ // there, there is a property with this name in the prototype chain.
+ // We follow Safari and Firefox behavior and only set the property
+ // locally if there is an explicit initialization value that we have
+ // to assign to the property. When adding the property we take
+ // special precautions to always add it as a local property even in
+ // case of callbacks in the prototype chain (this rules out using
+ // SetProperty). We have IgnoreAttributesAndSetLocalProperty for
+ // this.
LookupResult lookup;
global->LocalLookup(*name, &lookup);
if (!lookup.IsProperty()) {
- Object* value = (assign) ? args[1] : Heap::undefined_value();
- return global->IgnoreAttributesAndSetLocalProperty(*name,
- value,
- attributes);
+ if (assign) {
+ return global->IgnoreAttributesAndSetLocalProperty(*name,
+ args[1],
+ attributes);
+ }
+ return Heap::undefined_value();
}
// Determine if this is a redeclaration of something read-only.
@@ -2604,12 +2610,12 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
}
} else {
// Attempt dictionary lookup.
- Dictionary* dictionary = receiver->property_dictionary();
- int entry = dictionary->FindStringEntry(key);
- if ((entry != Dictionary::kNotFound) &&
+ StringDictionary* dictionary = receiver->property_dictionary();
+ int entry = dictionary->FindEntry(key);
+ if ((entry != StringDictionary::kNotFound) &&
(dictionary->DetailsAt(entry).type() == NORMAL)) {
Object* value = dictionary->ValueAt(entry);
- if (receiver->IsJSGlobalObject()) {
+ if (receiver->IsGlobalObject()) {
value = JSGlobalPropertyCell::cast(value)->value();
}
return value;
@@ -5130,8 +5136,8 @@ class ArrayConcatVisitor {
storage_->set(index, *elm);
} else {
- Handle<Dictionary> dict = Handle<Dictionary>::cast(storage_);
- Handle<Dictionary> result =
+ Handle<NumberDictionary> dict = Handle<NumberDictionary>::cast(storage_);
+ Handle<NumberDictionary> result =
Factory::DictionaryAtNumberPut(dict, index, elm);
if (!result.is_identical_to(dict))
storage_ = result;
@@ -5179,7 +5185,7 @@ static uint32_t IterateElements(Handle<JSObject> receiver,
}
} else {
- Handle<Dictionary> dict(receiver->element_dictionary());
+ Handle<NumberDictionary> dict(receiver->element_dictionary());
uint32_t capacity = dict->Capacity();
for (uint32_t j = 0; j < capacity; j++) {
Handle<Object> k(dict->KeyAt(j));
@@ -5333,7 +5339,7 @@ static Object* Runtime_ArrayConcat(Arguments args) {
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
- Factory::NewDictionary(at_least_space_for));
+ Factory::NewNumberDictionary(at_least_space_for));
}
Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@@ -5396,7 +5402,7 @@ static Object* Runtime_EstimateNumberOfElements(Arguments args) {
CONVERT_CHECKED(JSArray, array, args[0]);
HeapObject* elements = array->elements();
if (elements->IsDictionary()) {
- return Smi::FromInt(Dictionary::cast(elements)->NumberOfElements());
+ return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
} else {
return array->length();
}
@@ -7414,32 +7420,46 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
// element segments each containing a receiver, function and native
// code offset.
static Object* Runtime_CollectStackTrace(Arguments args) {
- ASSERT_EQ(args.length(), 1);
+ ASSERT_EQ(args.length(), 2);
Object* caller = args[0];
+ CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
+
+ HandleScope scope;
+
+ int initial_size = limit < 10 ? limit : 10;
+ Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
StackFrameIterator iter;
- int frame_count = 0;
bool seen_caller = false;
- while (!iter.done()) {
- if (ShowFrameInStackTrace(iter.frame(), caller, &seen_caller))
- frame_count++;
- iter.Advance();
- }
- HandleScope scope;
- Handle<JSArray> result = Factory::NewJSArray(frame_count * 3);
- int i = 0;
- seen_caller = false;
- for (iter.Reset(); !iter.done(); iter.Advance()) {
+ int cursor = 0;
+ int frames_seen = 0;
+ while (!iter.done() && frames_seen < limit) {
StackFrame* raw_frame = iter.frame();
if (ShowFrameInStackTrace(raw_frame, caller, &seen_caller)) {
+ frames_seen++;
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- result->SetElement(i++, frame->receiver());
- result->SetElement(i++, frame->function());
+ Object* recv = frame->receiver();
+ Object* fun = frame->function();
Address pc = frame->pc();
Address start = frame->code()->address();
- result->SetElement(i++, Smi::FromInt(pc - start));
+ Smi* offset = Smi::FromInt(pc - start);
+ FixedArray* elements = result->elements();
+ if (cursor + 2 < elements->length()) {
+ elements->set(cursor++, recv);
+ elements->set(cursor++, fun);
+ elements->set(cursor++, offset, SKIP_WRITE_BARRIER);
+ } else {
+ HandleScope scope;
+ SetElement(result, cursor++, Handle<Object>(recv));
+ SetElement(result, cursor++, Handle<Object>(fun));
+ SetElement(result, cursor++, Handle<Smi>(offset));
+ }
}
+ iter.Advance();
}
+
+ result->set_length(Smi::FromInt(cursor), SKIP_WRITE_BARRIER);
+
return *result;
}
diff --git a/V8Binding/v8/src/runtime.h b/V8Binding/v8/src/runtime.h
index 36e274a..cdf21dc 100644
--- a/V8Binding/v8/src/runtime.h
+++ b/V8Binding/v8/src/runtime.h
@@ -172,7 +172,7 @@ namespace internal {
F(FunctionGetPositionForOffset, 2) \
F(FunctionIsAPIFunction, 1) \
F(GetScript, 1) \
- F(CollectStackTrace, 1) \
+ F(CollectStackTrace, 2) \
\
F(ClassOf, 1) \
F(SetCode, 2) \
diff --git a/V8Binding/v8/src/runtime.js b/V8Binding/v8/src/runtime.js
index 25cc5ba..789bfdb 100644
--- a/V8Binding/v8/src/runtime.js
+++ b/V8Binding/v8/src/runtime.js
@@ -161,14 +161,31 @@ function ADD(x) {
// Left operand (this) is already a string.
function STRING_ADD_LEFT(y) {
- if (!IS_STRING(y)) y = %ToString(%ToPrimitive(y, NO_HINT));
+ if (!IS_STRING(y)) {
+ if (IS_STRING_WRAPPER(y)) {
+ y = %_ValueOf(y);
+ } else {
+ y = IS_NUMBER(y)
+ ? %NumberToString(y)
+ : %ToString(%ToPrimitive(y, NO_HINT));
+ }
+ }
return %StringAdd(this, y);
}
// Right operand (y) is already a string.
function STRING_ADD_RIGHT(y) {
- var x = IS_STRING(this) ? this : %ToString(%ToPrimitive(this, NO_HINT));
+ var x = this;
+ if (!IS_STRING(x)) {
+ if (IS_STRING_WRAPPER(x)) {
+ x = %_ValueOf(x);
+ } else {
+ x = IS_NUMBER(x)
+ ? %NumberToString(x)
+ : %ToString(%ToPrimitive(x, NO_HINT));
+ }
+ }
return %StringAdd(x, y);
}
diff --git a/V8Binding/v8/src/serialize.cc b/V8Binding/v8/src/serialize.cc
index 7e38494..f633b06 100644
--- a/V8Binding/v8/src/serialize.cc
+++ b/V8Binding/v8/src/serialize.cc
@@ -712,9 +712,17 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
13,
"mul_two_doubles");
- Add(ExternalReference::compare_doubles().address(),
+ Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
14,
+ "div_two_doubles");
+ Add(ExternalReference::double_fp_operation(Token::MOD).address(),
+ UNCLASSIFIED,
+ 15,
+ "mod_two_doubles");
+ Add(ExternalReference::compare_doubles().address(),
+ UNCLASSIFIED,
+ 16,
"compare_doubles");
}
diff --git a/V8Binding/v8/src/spaces-inl.h b/V8Binding/v8/src/spaces-inl.h
index 2f01164..8b2eab0 100644
--- a/V8Binding/v8/src/spaces-inl.h
+++ b/V8Binding/v8/src/spaces-inl.h
@@ -93,17 +93,21 @@ Address Page::AllocationTop() {
void Page::ClearRSet() {
-#ifndef V8_HOST_ARCH_64_BIT
// This method can be called in all rset states.
memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
-#endif
}
-// Give an address a (32-bits):
+// Given a 32-bit address, separate its bits into:
// | page address | words (6) | bit offset (5) | pointer alignment (2) |
-// The rset address is computed as:
+// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4
+// For a 64-bit address, if it is:
+// | page address | quadwords(5) | bit offset(5) | pointer alignment (3) |
+// The address of the rset word containing the bit for this word is computed as:
+// page_address + quadwords * 4 + kRSetOffset.
+// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
+// even on the X64 architecture.
Address Page::ComputeRSetBitPosition(Address address, int offset,
uint32_t* bitmask) {
@@ -115,7 +119,7 @@ Address Page::ComputeRSetBitPosition(Address address, int offset,
*bitmask = 1 << (bit_offset % kBitsPerInt);
Address rset_address =
- page->address() + (bit_offset / kBitsPerInt) * kIntSize;
+ page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
// The remembered set address is either in the normal remembered set range
// of a page or else we have a large object page.
ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
@@ -131,8 +135,10 @@ Address Page::ComputeRSetBitPosition(Address address, int offset,
// of the object:
// (rset_address - page->ObjectAreaStart()).
// Ie, we can just add the object size.
+ // In the X64 architecture, the remembered set ends before the object start,
+ // so we need to add an additional offset, from rset end to object start
ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
- rset_address +=
+ rset_address += kObjectStartOffset - kRSetEndOffset +
FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
+ Array::kLengthOffset));
}
@@ -160,14 +166,9 @@ void Page::UnsetRSet(Address address, int offset) {
bool Page::IsRSetSet(Address address, int offset) {
-#ifdef V8_HOST_ARCH_64_BIT
- // TODO(X64): Reenable when RSet works.
- return true;
-#else // V8_HOST_ARCH_64_BIT
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
return (Memory::uint32_at(rset_address) & bitmask) != 0;
-#endif // V8_HOST_ARCH_64_BIT
}
diff --git a/V8Binding/v8/src/spaces.h b/V8Binding/v8/src/spaces.h
index 8ce807f..676652b 100644
--- a/V8Binding/v8/src/spaces.h
+++ b/V8Binding/v8/src/spaces.h
@@ -93,13 +93,14 @@ class AllocationInfo;
// bytes are used as remembered set, and the rest of the page is the object
// area.
//
-// Pointers are aligned to the pointer size (4 bytes), only 1 bit is needed
+// Pointers are aligned to the pointer size (4), only 1 bit is needed
// for a pointer in the remembered set. Given an address, its remembered set
// bit position (offset from the start of the page) is calculated by dividing
// its page offset by 32. Therefore, the object area in a page starts at the
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
// the first two words (64 bits) in a page can be used for other purposes.
// TODO(X64): This description only represents the 32-bit layout.
+// On the 64-bit platform, we add an offset to the start of the remembered set.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
@@ -217,15 +218,25 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+ // The offset of the remembered set in a page, in addition to the empty words
+ // formed as the remembered bits of the remembered set itself.
+#ifdef V8_TARGET_ARCH_X64
+ static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
+#else
+ static const int kRSetOffset = 0;
+#endif
// The end offset of the remembered set in a page
// (heaps are aligned to pointer size).
- static const int kRSetEndOffset= kPageSize / kBitsPerPointer;
-
- // The start offset of the remembered set in a page.
- static const int kRSetStartOffset = kRSetEndOffset / kBitsPerPointer;
+ static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
// The start offset of the object area in a page.
- static const int kObjectStartOffset = kRSetEndOffset;
+ // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
+ // to align start of rset to a uint32_t address.
+ static const int kObjectStartOffset = 256;
+
+ // The start offset of the remembered set in a page.
+ static const int kRSetStartOffset = kRSetOffset +
+ kObjectStartOffset / kBitsPerPointer;
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
diff --git a/V8Binding/v8/src/string.js b/V8Binding/v8/src/string.js
index 6164eb8..263fac5 100644
--- a/V8Binding/v8/src/string.js
+++ b/V8Binding/v8/src/string.js
@@ -433,7 +433,7 @@ function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
if (m == 1) {
var s = CaptureString(subject, lastMatchInfo, 0);
// Don't call directly to avoid exposing the built-in global object.
- return ToString(replace.call(null, s, index, subject));
+ return replace.call(null, s, index, subject);
}
var parameters = $Array(m + 2);
for (var j = 0; j < m; j++) {
@@ -441,7 +441,7 @@ function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
}
parameters[j] = index;
parameters[j + 1] = subject;
- return ToString(replace.apply(null, parameters));
+ return replace.apply(null, parameters);
}
diff --git a/V8Binding/v8/src/stub-cache.cc b/V8Binding/v8/src/stub-cache.cc
index 1999d13..49b20e2 100644
--- a/V8Binding/v8/src/stub-cache.cc
+++ b/V8Binding/v8/src/stub-cache.cc
@@ -173,7 +173,7 @@ Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
Object* StubCache::ComputeLoadGlobal(String* name,
- JSGlobalObject* receiver,
+ GlobalObject* receiver,
JSGlobalPropertyCell* cell,
bool is_dont_delete) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
@@ -336,7 +336,7 @@ Object* StubCache::ComputeStoreField(String* name,
Object* StubCache::ComputeStoreGlobal(String* name,
- JSGlobalObject* receiver,
+ GlobalObject* receiver,
JSGlobalPropertyCell* cell) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
Object* code = receiver->map()->FindInCodeCache(name, flags);
@@ -537,7 +537,7 @@ Object* StubCache::ComputeCallNormal(int argc,
Object* StubCache::ComputeCallGlobal(int argc,
InLoopFlag in_loop,
String* name,
- JSGlobalObject* receiver,
+ GlobalObject* receiver,
JSGlobalPropertyCell* cell,
JSFunction* function) {
Code::Flags flags =
@@ -562,8 +562,8 @@ Object* StubCache::ComputeCallGlobal(int argc,
static Object* GetProbeValue(Code::Flags flags) {
- Dictionary* dictionary = Heap::non_monomorphic_cache();
- int entry = dictionary->FindNumberEntry(flags);
+ NumberDictionary* dictionary = Heap::non_monomorphic_cache();
+ int entry = dictionary->FindEntry(flags);
if (entry != -1) return dictionary->ValueAt(entry);
return Heap::undefined_value();
}
@@ -579,7 +579,7 @@ static Object* ProbeCache(Code::Flags flags) {
Heap::non_monomorphic_cache()->AtNumberPut(flags,
Heap::undefined_value());
if (result->IsFailure()) return result;
- Heap::set_non_monomorphic_cache(Dictionary::cast(result));
+ Heap::set_non_monomorphic_cache(NumberDictionary::cast(result));
return probe;
}
@@ -587,7 +587,7 @@ static Object* ProbeCache(Code::Flags flags) {
static Object* FillCache(Object* code) {
if (code->IsCode()) {
int entry =
- Heap::non_monomorphic_cache()->FindNumberEntry(
+ Heap::non_monomorphic_cache()->FindEntry(
Code::cast(code)->flags());
// The entry must be present see comment in ProbeCache.
ASSERT(entry != -1);
diff --git a/V8Binding/v8/src/stub-cache.h b/V8Binding/v8/src/stub-cache.h
index 577e04b..9abf370 100644
--- a/V8Binding/v8/src/stub-cache.h
+++ b/V8Binding/v8/src/stub-cache.h
@@ -79,7 +79,7 @@ class StubCache : public AllStatic {
static Object* ComputeLoadGlobal(String* name,
- JSGlobalObject* receiver,
+ GlobalObject* receiver,
JSGlobalPropertyCell* cell,
bool is_dont_delete);
@@ -119,7 +119,7 @@ class StubCache : public AllStatic {
Map* transition = NULL);
static Object* ComputeStoreGlobal(String* name,
- JSGlobalObject* receiver,
+ GlobalObject* receiver,
JSGlobalPropertyCell* cell);
static Object* ComputeStoreCallback(String* name,
@@ -164,7 +164,7 @@ class StubCache : public AllStatic {
static Object* ComputeCallGlobal(int argc,
InLoopFlag in_loop,
String* name,
- JSGlobalObject* receiver,
+ GlobalObject* receiver,
JSGlobalPropertyCell* cell,
JSFunction* function);
@@ -197,11 +197,13 @@ class StubCache : public AllStatic {
static void GenerateMiss(MacroAssembler* masm);
// Generate code for probing the stub cache table.
+ // If extra != no_reg it might be used as am extra scratch register.
static void GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
- Register scratch);
+ Register scratch,
+ Register extra);
enum Table {
kPrimary,
@@ -433,7 +435,7 @@ class LoadStubCompiler: public StubCompiler {
JSObject* holder,
String* name);
- Object* CompileLoadGlobal(JSGlobalObject* object,
+ Object* CompileLoadGlobal(GlobalObject* object,
JSGlobalPropertyCell* holder,
String* name,
bool is_dont_delete);
@@ -479,7 +481,7 @@ class StoreStubCompiler: public StubCompiler {
AccessorInfo* callbacks,
String* name);
Object* CompileStoreInterceptor(JSObject* object, String* name);
- Object* CompileStoreGlobal(JSGlobalObject* object,
+ Object* CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* holder,
String* name);
@@ -517,7 +519,7 @@ class CallStubCompiler: public StubCompiler {
Object* CompileCallInterceptor(Object* object,
JSObject* holder,
String* name);
- Object* CompileCallGlobal(JSGlobalObject* object,
+ Object* CompileCallGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name);
diff --git a/V8Binding/v8/src/version.cc b/V8Binding/v8/src/version.cc
index fd585dc..7e009fb 100644
--- a/V8Binding/v8/src/version.cc
+++ b/V8Binding/v8/src/version.cc
@@ -34,9 +34,9 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 2
-#define BUILD_NUMBER 11
+#define BUILD_NUMBER 12
#define PATCH_LEVEL 0
-#define CANDIDATE_VERSION true
+#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
diff --git a/V8Binding/v8/src/x64/assembler-x64.cc b/V8Binding/v8/src/x64/assembler-x64.cc
index ced7577..2ccfd15 100644
--- a/V8Binding/v8/src/x64/assembler-x64.cc
+++ b/V8Binding/v8/src/x64/assembler-x64.cc
@@ -273,6 +273,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
+ ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
@@ -470,8 +471,8 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
- emit(0x83);
if (is_int8(src.value_)) {
+ emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
} else if (dst.is(rax)) {
@@ -1560,6 +1561,7 @@ void Assembler::fldz() {
void Assembler::fld_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(0, adr);
}
@@ -1568,6 +1570,7 @@ void Assembler::fld_s(const Operand& adr) {
void Assembler::fld_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(0, adr);
}
@@ -1576,6 +1579,7 @@ void Assembler::fld_d(const Operand& adr) {
void Assembler::fstp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(3, adr);
}
@@ -1584,6 +1588,7 @@ void Assembler::fstp_s(const Operand& adr) {
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(3, adr);
}
@@ -1592,6 +1597,7 @@ void Assembler::fstp_d(const Operand& adr) {
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(0, adr);
}
@@ -1600,6 +1606,7 @@ void Assembler::fild_s(const Operand& adr) {
void Assembler::fild_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(5, adr);
}
@@ -1608,6 +1615,7 @@ void Assembler::fild_d(const Operand& adr) {
void Assembler::fistp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(3, adr);
}
@@ -1617,6 +1625,7 @@ void Assembler::fisttp_s(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(1, adr);
}
@@ -1625,6 +1634,7 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(2, adr);
}
@@ -1633,6 +1643,7 @@ void Assembler::fist_s(const Operand& adr) {
void Assembler::fistp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(8, adr);
}
@@ -1687,6 +1698,7 @@ void Assembler::fsub(int i) {
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDA);
emit_operand(4, adr);
}
@@ -2010,11 +2022,11 @@ void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- emit(0xC0 | (dst.code() << 3) | src.code());
+ emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
- emit(0xC0 | (dst.code() << 3) | src.code());
+ emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
diff --git a/V8Binding/v8/src/x64/assembler-x64.h b/V8Binding/v8/src/x64/assembler-x64.h
index 4e1eeff..d99401b 100644
--- a/V8Binding/v8/src/x64/assembler-x64.h
+++ b/V8Binding/v8/src/x64/assembler-x64.h
@@ -160,6 +160,17 @@ struct XMMRegister {
return code_;
}
+ // Return the high bit of the register code as a 0 or 1. Used often
+ // when constructing the REX prefix byte.
+ int high_bit() const {
+ return code_ >> 3;
+ }
+ // Return the 3 low bits of the register code. Used when encoding registers
+ // in modR/M, SIB, and opcode bytes.
+ int low_bits() const {
+ return code_ & 0x7;
+ }
+
int code_;
};
@@ -522,6 +533,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x0, dst, src);
}
+ void addl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
@@ -539,10 +554,6 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x0, dst, src);
}
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -723,6 +734,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x5, dst, src);
}
+ void subl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testl(Register reg, Immediate mask);
diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc
index 1854aaa..54138a2 100644
--- a/V8Binding/v8/src/x64/codegen-x64.cc
+++ b/V8Binding/v8/src/x64/codegen-x64.cc
@@ -154,8 +154,7 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- // fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- false) {
+ function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
frame_->SpillAll();
__ int3();
}
@@ -3336,22 +3335,98 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ frame_->SpillAll();
-void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+ // Make sure the frame is aligned like the OS expects.
+ static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ if (kFrameAlignment > 0) {
+ ASSERT(IsPowerOf2(kFrameAlignment));
+ __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register.
+ __ and_(rsp, Immediate(-kFrameAlignment));
+ }
+
+ // Call V8::RandomPositiveSmi().
+ __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+
+ // Restore stack pointer from callee-saved register edi.
+ if (kFrameAlignment > 0) {
+ __ movq(rsp, rbx);
+ }
+
+ Result result = allocator_->Allocate(rax);
+ frame_->Push(&result);
}
+
void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- // TODO(X64): Optimize this like it's done on IA-32.
ASSERT(args->length() == 1);
+ JumpTarget leave, null, function, non_function_constructor;
Load(args->at(0)); // Load the object.
- Result result = frame_->CallRuntime(Runtime::kClassOf, 1);
- frame_->Push(&result);
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ frame_->Spill(obj.reg());
+
+ // If the object is a smi, we return null.
+ __ testl(obj.reg(), Immediate(kSmiTagMask));
+ null.Branch(zero);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ { Result tmp = allocator()->Allocate();
+ __ movq(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movb(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
+ __ cmpb(tmp.reg(), Immediate(FIRST_JS_OBJECT_TYPE));
+ null.Branch(less);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ cmpb(tmp.reg(), Immediate(JS_FUNCTION_TYPE));
+ function.Branch(equal);
+ }
+
+ // Check if the constructor in the map is a function.
+ { Result tmp = allocator()->Allocate();
+ __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ non_function_constructor.Branch(not_equal);
+ }
+
+ // The obj register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(),
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->Push(&obj);
+ leave.Jump();
+
+ // Functions have class 'Function'.
+ function.Bind();
+ frame_->Push(Factory::function_class_symbol());
+ leave.Jump();
+
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ frame_->Push(Factory::Object_symbol());
+ leave.Jump();
+
+ // Non-JS objects have class null.
+ null.Bind();
+ frame_->Push(Factory::null_value());
+
+ // All done.
+ leave.Bind();
}
@@ -4311,8 +4386,8 @@ void CodeGenerator::Comparison(Condition cc,
Register left_reg = left_side.reg();
Register right_reg = right_side.reg();
- __ movq(kScratchRegister, left_side.reg());
- __ or_(kScratchRegister, right_side.reg());
+ __ movq(kScratchRegister, left_reg);
+ __ or_(kScratchRegister, right_reg);
__ testl(kScratchRegister, Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
// When non-smi, call out to the compare stub.
@@ -4651,8 +4726,6 @@ class DeferredInlineSmiAdd: public DeferredCode {
void DeferredInlineSmiAdd::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ subq(dst_, Immediate(value_));
__ push(dst_);
__ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
@@ -4683,8 +4756,6 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
void DeferredInlineSmiAddReversed::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ subq(dst_, Immediate(value_));
__ push(Immediate(value_));
__ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
@@ -4716,8 +4787,6 @@ class DeferredInlineSmiSub: public DeferredCode {
void DeferredInlineSmiSub::Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- __ addq(dst_, Immediate(value_));
__ push(dst_);
__ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
@@ -4759,9 +4828,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::ADD: {
operand->ToRegister();
frame_->Spill(operand->reg());
-
- // Optimistically add. Call the specialized add stub if the
- // result is not a smi or overflows.
DeferredCode* deferred = NULL;
if (reversed) {
deferred = new DeferredInlineSmiAddReversed(operand->reg(),
@@ -4772,11 +4838,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
overwrite_mode);
}
- __ movq(kScratchRegister, value, RelocInfo::NONE);
- __ addl(operand->reg(), kScratchRegister);
- deferred->Branch(overflow);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
+ // A smi currently fits in a 32-bit Immediate.
+ __ addl(operand->reg(), Immediate(smi_value));
+ Label add_success;
+ __ j(no_overflow, &add_success);
+ __ subl(operand->reg(), Immediate(smi_value));
+ __ movsxlq(operand->reg(), operand->reg());
+ deferred->Jump();
+ __ bind(&add_success);
+ __ movsxlq(operand->reg(), operand->reg());
deferred->BindExit();
frame_->Push(operand);
break;
@@ -5062,12 +5134,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ movq(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
- __ addl(answer.reg(), right->reg()); // Add optimistically.
+ __ addl(answer.reg(), right->reg());
deferred->Branch(overflow);
break;
case Token::SUB:
- __ subl(answer.reg(), right->reg()); // Subtract optimistically.
+ __ subl(answer.reg(), right->reg());
deferred->Branch(overflow);
break;
@@ -5488,27 +5560,18 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
void UnarySubStub::Generate(MacroAssembler* masm) {
Label slow;
Label done;
- Label try_float;
// Check whether the value is a smi.
__ testl(rax, Immediate(kSmiTagMask));
// TODO(X64): Add inline code that handles floats, as on ia32 platform.
__ j(not_zero, &slow);
-
- // Enter runtime system if the value of the expression is zero
+ // Enter runtime system if the value of the smi is zero
// to make sure that we switch between 0 and -0.
- __ testq(rax, rax);
+ // Also enter it if the value of the smi is Smi::kMinValue
+ __ testl(rax, Immediate(0x7FFFFFFE));
__ j(zero, &slow);
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ movq(rdx, rax);
- __ xor_(rax, rax);
- __ subl(rax, rdx);
- __ j(no_overflow, &done);
- // Restore rax and enter runtime system.
- __ movq(rax, rdx);
-
+ __ neg(rax);
+ __ jmp(&done);
// Enter runtime system.
__ bind(&slow);
__ pop(rcx); // pop return address
@@ -5760,6 +5823,62 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Implements "value instanceof function" operator.
+ // Expected input state:
+ // rsp[0] : return address
+ // rsp[1] : function pointer
+ // rsp[2] : value
+
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+
+ // Check that the left hand is a JS object. Leave its map in rax.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &slow);
+ __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Get the prototype of the function.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ TryGetFunctionPrototype(rdx, rbx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ j(below, &slow);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Register mapping: rax is object map and rbx is function prototype.
+ __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ Move(kScratchRegister, Factory::null_value());
+ __ bind(&loop);
+ __ cmpq(rcx, rbx);
+ __ j(equal, &is_instance);
+ __ cmpq(rcx, kScratchRegister);
+ __ j(equal, &is_not_instance);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ xor_(rax, rax);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ __ movq(rax, Immediate(Smi::FromInt(1)));
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
@@ -5910,13 +6029,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
- // rdi: number of arguments including receiver.
+ // r14: number of arguments including receiver (C callee-saved).
// r15: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
if (do_gc) {
- __ movq(Operand(rsp, 0), rax); // Result.
+ // Pass failure code returned from last attempt as first argument to GC.
+#ifdef __MSVC__
+ __ movq(rcx, rax); // argc.
+#else // ! defined(__MSVC__)
+ __ movq(rdi, rax); // argv.
+#endif
__ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC),
RelocInfo::RUNTIME_ENTRY);
@@ -5933,11 +6057,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Call C function.
#ifdef __MSVC__
// MSVC passes arguments in rcx, rdx, r8, r9
- __ movq(rcx, rdi); // argc.
+ __ movq(rcx, r14); // argc.
__ movq(rdx, r15); // argv.
#else // ! defined(__MSVC__)
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- // First argument is already in rdi.
+ __ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
#endif
__ call(rbx);
@@ -6079,10 +6203,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer (restored after C call)
+ // rbp: frame pointer of calling JS frame (restored after C call)
// rsp: stack pointer (restored after C call)
- // rsi: current context (C callee-saved)
- // rdi: caller's parameter pointer pp (C callee-saved)
+ // rsi: current context (restored)
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
@@ -6096,16 +6219,16 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(frame_type);
- // rax: result parameter for PerformGC, if any (setup below).
- // Holds the result of a previous call to GenerateCore that
- // returned a failure. On next call, it's used as parameter
- // to Runtime::PerformGC.
+ // rax: Holds the context at this point, but should not be used.
+ // On entry to code generated by GenerateCore, it must hold
+ // a failure result if the collect_garbage argument to GenerateCore
+ // is true. This failure result can be the result of code
+ // generated by a previous call to GenerateCore. The value
+ // of rax is then passed to Runtime::PerformGC.
// rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // rdi: number of arguments including receiver (destroyed by C call).
- // The rdi register is not callee-save in Unix 64-bit ABI, so
- // we must treat it as volatile.
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
// r15: argv pointer (C callee-saved).
Label throw_out_of_memory_exception;
@@ -6370,17 +6493,18 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
__ bind(&load_smi_lhs);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiTag == 0);
- __ lea(kScratchRegister, Operand(lhs, lhs, times_1, 0));
+ __ movsxlq(kScratchRegister, lhs);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
+ __ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_lhs);
__ bind(&load_smi_rhs);
- __ movq(kScratchRegister, rhs);
+ __ movsxlq(kScratchRegister, rhs);
__ sar(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
+ __ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ bind(&done);
@@ -6389,24 +6513,18 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float) {
Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
+ // Test if both operands are numbers (heap_numbers or smis).
+ // If not, jump to label non_float.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &test_other); // argument in rdx is OK
- __ movq(kScratchRegister,
- Factory::heap_number_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(kScratchRegister, FieldOperand(rdx, HeapObject::kMapOffset));
- __ j(not_equal, non_float); // argument in rdx is not a number -> NaN
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, non_float); // The argument in rdx is not a number.
__ bind(&test_other);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &done); // argument in rax is OK
- __ movq(kScratchRegister,
- Factory::heap_number_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(kScratchRegister, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, non_float); // argument in rax is not a number -> NaN
+ __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, non_float); // The argument in rax is not a number.
// Fall-through: Both operands are numbers.
__ bind(&done);
@@ -6433,49 +6551,26 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
- // Prepare the smi check of both operands by or'ing them together
- // before checking against the smi mask.
+ // Smi check both operands.
__ movq(rcx, rbx);
__ or_(rcx, rax);
+ __ testl(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, slow);
switch (op_) {
- case Token::ADD:
- __ addl(rax, rbx); // add optimistically
- __ j(overflow, slow);
+ case Token::ADD: {
+ __ addl(rax, rbx);
+ __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break;
+ }
- case Token::SUB:
- __ subl(rax, rbx); // subtract optimistically
- __ j(overflow, slow);
+ case Token::SUB: {
+ __ subl(rax, rbx);
+ __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break;
-
- case Token::DIV:
- case Token::MOD:
- // Sign extend rax into rdx:rax
- // (also sign extends eax into edx if eax is Smi).
- __ cqo();
- // Check for 0 divisor.
- __ testq(rbx, rbx);
- __ j(zero, slow);
- break;
-
- default:
- // Fall-through to smi check.
- break;
- }
-
- // Perform the actual smi check.
- ASSERT(kSmiTag == 0); // adjust zero check if not the case
- __ testl(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, slow);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- // Do nothing here.
- break;
+ }
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
@@ -6492,6 +6587,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
break;
case Token::DIV:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
// Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
__ idiv(rbx);
// Check that the remainder is zero.
@@ -6513,6 +6614,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
break;
case Token::MOD:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
// Divide rdx:rax by rbx.
__ idiv(rbx);
// Check for negative zero result.
@@ -6680,6 +6787,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ testl(rax, Immediate(1));
__ j(not_zero, &operand_conversion_failure);
} else {
+ // TODO(X64): Verify that SSE3 is always supported, drop this code.
// Check if right operand is int32.
__ fist_s(Operand(rsp, 0 * kPointerSize));
__ fild_s(Operand(rsp, 0 * kPointerSize));
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc
index 780fcdb..1a0b119 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc
@@ -399,6 +399,51 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ testl(function, Immediate(kSmiTagMask));
+ j(zero, miss);
+
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ movq(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ Cmp(result, Factory::the_hole_value());
+ j(equal, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ CmpObjectType(result, MAP_TYPE, kScratchRegister);
+ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ movq(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -704,6 +749,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Setup the frame structure on the stack.
+ // All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
@@ -718,7 +764,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address);
- movq(rdi, rax); // Backup rax before we use it.
+ movq(r14, rax); // Backup rax before we use it.
movq(rax, rbp);
store_rax(c_entry_fp_address);
@@ -728,7 +774,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, rdi, times_pointer_size, offset));
+ lea(r15, Operand(rbp, r14, times_pointer_size, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.h b/V8Binding/v8/src/x64/macro-assembler-x64.h
index c298a25..f13a7ad 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.h
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.h
@@ -176,6 +176,7 @@ class MacroAssembler: public Assembler {
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
+ // They may be the same register, and may be kScratchRegister.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
// Compare instance type for map.
@@ -237,11 +238,10 @@ class MacroAssembler: public Assembler {
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
+ // function register will be untouched; the other register may be
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
- Register scratch,
Label* miss);
// Generates code for reporting that an illegal operation has
@@ -384,12 +384,12 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) { \
- byte* x64_coverage_function = \
+ byte* x64_coverage_function = \
reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
masm->pushfd(); \
masm->pushad(); \
masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
+ masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
masm->pop(rax); \
masm->popad(); \
masm->popfd(); \
diff --git a/V8Binding/v8/src/x64/stub-cache-x64.cc b/V8Binding/v8/src/x64/stub-cache-x64.cc
index f2e0e19..1a24694 100644
--- a/V8Binding/v8/src/x64/stub-cache-x64.cc
+++ b/V8Binding/v8/src/x64/stub-cache-x64.cc
@@ -65,7 +65,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* a,
-Object* CallStubCompiler::CompileCallGlobal(JSGlobalObject* object,
+Object* CallStubCompiler::CompileCallGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
@@ -109,7 +109,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
}
-Object* LoadStubCompiler::CompileLoadGlobal(JSGlobalObject* object,
+Object* LoadStubCompiler::CompileLoadGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
@@ -141,7 +141,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
}
-Object* StoreStubCompiler::CompileStoreGlobal(JSGlobalObject* object,
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
UNIMPLEMENTED();
diff --git a/V8Binding/v8/test/cctest/test-api.cc b/V8Binding/v8/test/cctest/test-api.cc
index 426b720..e77238b 100644
--- a/V8Binding/v8/test/cctest/test-api.cc
+++ b/V8Binding/v8/test/cctest/test-api.cc
@@ -7024,3 +7024,20 @@ THREADED_TEST(GetCallingContext) {
calling_context1.Clear();
calling_context2.Clear();
}
+
+
+// Check that a variable declaration with no explicit initialization
+// value does not shadow an existing property in the prototype chain.
+//
+// This is consistent with Firefox and Safari.
+//
+// See http://crbug.com/12548.
+THREADED_TEST(InitGlobalVarInProtoChain) {
+ v8::HandleScope scope;
+ LocalContext context;
+ // Introduce a variable in the prototype chain.
+ CompileRun("__proto__.x = 42");
+ v8::Handle<v8::Value> result = CompileRun("var x; x");
+ CHECK(!result->IsUndefined());
+ CHECK_EQ(42, result->Int32Value());
+}
diff --git a/V8Binding/v8/test/cctest/test-decls.cc b/V8Binding/v8/test/cctest/test-decls.cc
index ecdad2e..6c48f64 100644
--- a/V8Binding/v8/test/cctest/test-decls.cc
+++ b/V8Binding/v8/test/cctest/test-decls.cc
@@ -534,10 +534,10 @@ TEST(ExistsInPrototype) {
{ ExistsInPrototypeContext context;
context.Check("var x; x",
- 0,
+ 1, // get
0,
1, // declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_EXCEPTION);
}
{ ExistsInPrototypeContext context;
diff --git a/V8Binding/v8/test/cctest/test-regexp.cc b/V8Binding/v8/test/cctest/test-regexp.cc
index 8761cf5..62597fb 100644
--- a/V8Binding/v8/test/cctest/test-regexp.cc
+++ b/V8Binding/v8/test/cctest/test-regexp.cc
@@ -204,8 +204,8 @@ TEST(Parser) {
CHECK_PARSE_EQ("(?=a){9,10}a", "(: (-> + 'a') 'a')");
CHECK_PARSE_EQ("(?!a)?a", "'a'");
CHECK_PARSE_EQ("\\1(a)", "(^ 'a')");
- CHECK_PARSE_EQ("(?!(a))\\1", "(-> - (^ 'a'))");
- CHECK_PARSE_EQ("(?!\\1(a\\1)\\1)\\1", "(-> - (: (^ 'a') (<- 1)))");
+ CHECK_PARSE_EQ("(?!(a))\\1", "(: (-> - (^ 'a')) (<- 1))");
+ CHECK_PARSE_EQ("(?!\\1(a\\1)\\1)\\1", "(: (-> - (: (^ 'a') (<- 1))) (<- 1))");
CHECK_PARSE_EQ("[\\0]", "[\\x00]");
CHECK_PARSE_EQ("[\\11]", "[\\x09]");
CHECK_PARSE_EQ("[\\11a]", "[\\x09 a]");
diff --git a/V8Binding/v8/test/mjsunit/call-non-function.js b/V8Binding/v8/test/mjsunit/call-non-function.js
index 8ed5ccb..9fe3b0f 100644
--- a/V8Binding/v8/test/mjsunit/call-non-function.js
+++ b/V8Binding/v8/test/mjsunit/call-non-function.js
@@ -51,4 +51,13 @@ TryCall(1234);
TryCall("hest");
-
+// Make sure that calling a non-function global doesn't crash the
+// system while building the IC for it.
+var NonFunction = 42;
+function WillThrow() {
+ NonFunction();
+}
+assertThrows(WillThrow);
+assertThrows(WillThrow);
+assertThrows(WillThrow);
+assertThrows(WillThrow);
diff --git a/V8Binding/v8/test/mjsunit/debug-sourceinfo.js b/V8Binding/v8/test/mjsunit/debug-sourceinfo.js
index 0235796..ddf80dc 100644
--- a/V8Binding/v8/test/mjsunit/debug-sourceinfo.js
+++ b/V8Binding/v8/test/mjsunit/debug-sourceinfo.js
@@ -1,352 +1,352 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// For this test to work this file MUST have CR LF line endings.
-function a() { b(); };
-function b() {
- c(true);
-};
- function c(x) {
- if (x) {
- return 1;
- } else {
- return 1;
- }
- };
-function d(x) {
- x = 1 ;
- x = 2 ;
- x = 3 ;
- x = 4 ;
- x = 5 ;
- x = 6 ;
- x = 7 ;
- x = 8 ;
- x = 9 ;
- x = 10;
- x = 11;
- x = 12;
- x = 13;
- x = 14;
- x = 15;
-}
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// This is the number of comment lines above the first test function.
-var comment_lines = 29;
-
-// This is the last position in the entire file (note: this equals
-// file size of <debug-sourceinfo.js> - 1, since starting at 0).
-var last_position = 14312;
-// This is the last line of entire file (note: starting at 0).
-var last_line = 351;
-// This is the last column of last line (note: starting at 0 and +2, due
-// to trailing <CR><LF>).
-var last_column = 2;
-
-// This magic number is the length or the first line comment (actually number
-// of characters before 'function a(...'.
-var comment_line_length = 1726;
-var start_a = 10 + comment_line_length;
-var start_b = 37 + comment_line_length;
-var start_c = 71 + comment_line_length;
-var start_d = 163 + comment_line_length;
-
-// The position of the first line of d(), i.e. "x = 1 ;".
-var start_code_d = start_d + 7;
-// The line # of the first line of d() (note: starting at 0).
-var start_line_d = 41;
-var line_length_d = 11;
-var num_lines_d = 15;
-
-assertEquals(start_a, Debug.sourcePosition(a));
-assertEquals(start_b, Debug.sourcePosition(b));
-assertEquals(start_c, Debug.sourcePosition(c));
-assertEquals(start_d, Debug.sourcePosition(d));
-
-var script = Debug.findScript(a);
-assertTrue(script.data === Debug.findScript(b).data);
-assertTrue(script.data === Debug.findScript(c).data);
-assertTrue(script.data === Debug.findScript(d).data);
-assertTrue(script.source === Debug.findScript(b).source);
-assertTrue(script.source === Debug.findScript(c).source);
-assertTrue(script.source === Debug.findScript(d).source);
-
-// Test that when running through source positions the position, line and
-// column progresses as expected.
-var position;
-var line;
-var column;
-for (var p = 0; p < 100; p++) {
- var location = script.locationFromPosition(p);
- if (p > 0) {
- assertEquals(position + 1, location.position);
- if (line == location.line) {
- assertEquals(column + 1, location.column);
- } else {
- assertEquals(line + 1, location.line);
- assertEquals(0, location.column);
- }
- } else {
- assertEquals(0, location.position);
- assertEquals(0, location.line);
- assertEquals(0, location.column);
- }
-
- // Remember the location.
- position = location.position;
- line = location.line;
- column = location.column;
-}
-
-// Every line of d() is the same length. Verify we can loop through all
-// positions and find the right line # for each.
-var p = start_code_d;
-for (line = 0; line < num_lines_d; line++) {
- for (column = 0; column < line_length_d; column++) {
- var location = script.locationFromPosition(p);
- assertEquals(p, location.position);
- assertEquals(start_line_d + line, location.line);
- assertEquals(column, location.column);
- p++;
- }
-}
-
-// Test first position.
-assertEquals(0, script.locationFromPosition(0).position);
-assertEquals(0, script.locationFromPosition(0).line);
-assertEquals(0, script.locationFromPosition(0).column);
-
-// Test second position.
-assertEquals(1, script.locationFromPosition(1).position);
-assertEquals(0, script.locationFromPosition(1).line);
-assertEquals(1, script.locationFromPosition(1).column);
-
-// Test first position in function a().
-assertEquals(start_a, script.locationFromPosition(start_a).position);
-assertEquals(0, script.locationFromPosition(start_a).line - comment_lines);
-assertEquals(10, script.locationFromPosition(start_a).column);
-
-// Test first position in function b().
-assertEquals(start_b, script.locationFromPosition(start_b).position);
-assertEquals(1, script.locationFromPosition(start_b).line - comment_lines);
-assertEquals(13, script.locationFromPosition(start_b).column);
-
-// Test first position in function c().
-assertEquals(start_c, script.locationFromPosition(start_c).position);
-assertEquals(4, script.locationFromPosition(start_c).line - comment_lines);
-assertEquals(12, script.locationFromPosition(start_c).column);
-
-// Test first position in function d().
-assertEquals(start_d, script.locationFromPosition(start_d).position);
-assertEquals(11, script.locationFromPosition(start_d).line - comment_lines);
-assertEquals(10, script.locationFromPosition(start_d).column);
-
-// Test first line.
-assertEquals(0, script.locationFromLine().position);
-assertEquals(0, script.locationFromLine().line);
-assertEquals(0, script.locationFromLine().column);
-assertEquals(0, script.locationFromLine(0).position);
-assertEquals(0, script.locationFromLine(0).line);
-assertEquals(0, script.locationFromLine(0).column);
-
-// Test first line column 1.
-assertEquals(1, script.locationFromLine(0, 1).position);
-assertEquals(0, script.locationFromLine(0, 1).line);
-assertEquals(1, script.locationFromLine(0, 1).column);
-
-// Test first line offset 1.
-assertEquals(1, script.locationFromLine(0, 0, 1).position);
-assertEquals(0, script.locationFromLine(0, 0, 1).line);
-assertEquals(1, script.locationFromLine(0, 0, 1).column);
-
-// Test offset function a().
-assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, 0, start_a).column);
-
-// Test second line offset function a().
-assertEquals(start_a + 14, script.locationFromLine(1, 0, start_a).position);
-assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_a).column);
-
-// Test second line column 2 offset function a().
-assertEquals(start_a + 14 + 2, script.locationFromLine(1, 2, start_a).position);
-assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);
-assertEquals(2, script.locationFromLine(1, 2, start_a).column);
-
-// Test offset function b().
-assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);
-assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);
-assertEquals(13, script.locationFromLine(0, 0, start_b).column);
-
-// Test second line offset function b().
-assertEquals(start_b + 6, script.locationFromLine(1, 0, start_b).position);
-assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_b).column);
-
-// Test second line column 11 offset function b().
-assertEquals(start_b + 6 + 11, script.locationFromLine(1, 11, start_b).position);
-assertEquals(2, script.locationFromLine(1, 11, start_b).line - comment_lines);
-assertEquals(11, script.locationFromLine(1, 11, start_b).column);
-
-// Test second line column 12 offset function b. Second line in b is 11 long
-// using column 12 wraps to next line.
-assertEquals(start_b + 6 + 12, script.locationFromLine(1, 12, start_b).position);
-assertEquals(3, script.locationFromLine(1, 12, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 12, start_b).column);
-
-// Test the Debug.findSourcePosition which wraps SourceManager.
-assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);
-assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);
-assertEquals(6 + start_b, Debug.findFunctionSourceLocation(b, 1, 0).position);
-assertEquals(8 + start_b, Debug.findFunctionSourceLocation(b, 1, 2).position);
-assertEquals(18 + start_b, Debug.findFunctionSourceLocation(b, 2, 0).position);
-assertEquals(0 + start_c, Debug.findFunctionSourceLocation(c, 0, 0).position);
-assertEquals(7 + start_c, Debug.findFunctionSourceLocation(c, 1, 0).position);
-assertEquals(21 + start_c, Debug.findFunctionSourceLocation(c, 2, 0).position);
-assertEquals(38 + start_c, Debug.findFunctionSourceLocation(c, 3, 0).position);
-assertEquals(52 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);
-assertEquals(69 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);
-assertEquals(76 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);
-assertEquals(0 + start_d, Debug.findFunctionSourceLocation(d, 0, 0).position);
-assertEquals(7 + start_d, Debug.findFunctionSourceLocation(d, 1, 0).position);
-for (i = 1; i <= num_lines_d; i++) {
- assertEquals(7 + (i * line_length_d) + start_d, Debug.findFunctionSourceLocation(d, (i + 1), 0).position);
-}
-assertEquals(175 + start_d, Debug.findFunctionSourceLocation(d, 17, 0).position);
-
-// Make sure invalid inputs work properly.
-assertEquals(0, script.locationFromPosition(-1).line);
-assertEquals(null, script.locationFromPosition(last_position + 1));
-
-// Test last position.
-assertEquals(last_position, script.locationFromPosition(last_position).position);
-assertEquals(last_line, script.locationFromPosition(last_position).line);
-assertEquals(last_column, script.locationFromPosition(last_position).column);
-
-// Test source line and restriction. All the following tests start from line 1
-// column 2 in function b, which is the call to c.
-// c(true);
-// ^
-
-var location;
-
-location = script.locationFromLine(1, 0, start_b);
-assertEquals(' c(true);', location.sourceText());
-
-result = ['c', ' c', ' c(', ' c(', ' c(t']
-for (var i = 1; i <= 5; i++) {
- location = script.locationFromLine(1, 2, start_b);
- location.restrict(i);
- assertEquals(result[i - 1], location.sourceText());
-}
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(1, 0);
-assertEquals('c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 0);
-assertEquals('c(', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 1);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 2);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 3);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(3, 1);
-assertEquals(' c(', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 0);
-assertEquals('c(tru', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 2);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 4);
-assertEquals(' c(t', location.sourceText());
-
-// All the following tests start from line 1 column 10 in function b, which is
-// the final character.
-// c(true);
-// ^
-
-location = script.locationFromLine(1, 10, start_b);
-location.restrict(5, 0);
-assertEquals('rue);', location.sourceText());
-
-location = script.locationFromLine(1, 10, start_b);
-location.restrict(7, 0);
-assertEquals('(true);', location.sourceText());
-
-// All the following tests start from line 1 column 0 in function b, which is
-// the first character.
-// c(true);
-//^
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(5, 0);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(5, 4);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(7, 0);
-assertEquals(' c(tru', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(7, 6);
-assertEquals(' c(tru', location.sourceText());
-
-// Test that script.sourceLine(line) works.
-for (line = 0; line < num_lines_d; line++) {
- var line_content_regexp = new RegExp(" x = " + (line + 1));
- assertTrue(line_content_regexp.test(script.sourceLine(start_line_d + line)));
-}
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// For this test to work this file MUST have CR LF line endings.
+function a() { b(); };
+function b() {
+ c(true);
+};
+ function c(x) {
+ if (x) {
+ return 1;
+ } else {
+ return 1;
+ }
+ };
+function d(x) {
+ x = 1 ;
+ x = 2 ;
+ x = 3 ;
+ x = 4 ;
+ x = 5 ;
+ x = 6 ;
+ x = 7 ;
+ x = 8 ;
+ x = 9 ;
+ x = 10;
+ x = 11;
+ x = 12;
+ x = 13;
+ x = 14;
+ x = 15;
+}
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// This is the number of comment lines above the first test function.
+var comment_lines = 29;
+
+// This is the last position in the entire file (note: this equals
+// file size of <debug-sourceinfo.js> - 1, since starting at 0).
+var last_position = 14312;
+// This is the last line of entire file (note: starting at 0).
+var last_line = 351;
+// This is the last column of last line (note: starting at 0 and +2, due
+// to trailing <CR><LF>).
+var last_column = 2;
+
+// This magic number is the length or the first line comment (actually number
+// of characters before 'function a(...'.
+var comment_line_length = 1726;
+var start_a = 10 + comment_line_length;
+var start_b = 37 + comment_line_length;
+var start_c = 71 + comment_line_length;
+var start_d = 163 + comment_line_length;
+
+// The position of the first line of d(), i.e. "x = 1 ;".
+var start_code_d = start_d + 7;
+// The line # of the first line of d() (note: starting at 0).
+var start_line_d = 41;
+var line_length_d = 11;
+var num_lines_d = 15;
+
+assertEquals(start_a, Debug.sourcePosition(a));
+assertEquals(start_b, Debug.sourcePosition(b));
+assertEquals(start_c, Debug.sourcePosition(c));
+assertEquals(start_d, Debug.sourcePosition(d));
+
+var script = Debug.findScript(a);
+assertTrue(script.data === Debug.findScript(b).data);
+assertTrue(script.data === Debug.findScript(c).data);
+assertTrue(script.data === Debug.findScript(d).data);
+assertTrue(script.source === Debug.findScript(b).source);
+assertTrue(script.source === Debug.findScript(c).source);
+assertTrue(script.source === Debug.findScript(d).source);
+
+// Test that when running through source positions the position, line and
+// column progresses as expected.
+var position;
+var line;
+var column;
+for (var p = 0; p < 100; p++) {
+ var location = script.locationFromPosition(p);
+ if (p > 0) {
+ assertEquals(position + 1, location.position);
+ if (line == location.line) {
+ assertEquals(column + 1, location.column);
+ } else {
+ assertEquals(line + 1, location.line);
+ assertEquals(0, location.column);
+ }
+ } else {
+ assertEquals(0, location.position);
+ assertEquals(0, location.line);
+ assertEquals(0, location.column);
+ }
+
+ // Remember the location.
+ position = location.position;
+ line = location.line;
+ column = location.column;
+}
+
+// Every line of d() is the same length. Verify we can loop through all
+// positions and find the right line # for each.
+var p = start_code_d;
+for (line = 0; line < num_lines_d; line++) {
+ for (column = 0; column < line_length_d; column++) {
+ var location = script.locationFromPosition(p);
+ assertEquals(p, location.position);
+ assertEquals(start_line_d + line, location.line);
+ assertEquals(column, location.column);
+ p++;
+ }
+}
+
+// Test first position.
+assertEquals(0, script.locationFromPosition(0).position);
+assertEquals(0, script.locationFromPosition(0).line);
+assertEquals(0, script.locationFromPosition(0).column);
+
+// Test second position.
+assertEquals(1, script.locationFromPosition(1).position);
+assertEquals(0, script.locationFromPosition(1).line);
+assertEquals(1, script.locationFromPosition(1).column);
+
+// Test first position in function a().
+assertEquals(start_a, script.locationFromPosition(start_a).position);
+assertEquals(0, script.locationFromPosition(start_a).line - comment_lines);
+assertEquals(10, script.locationFromPosition(start_a).column);
+
+// Test first position in function b().
+assertEquals(start_b, script.locationFromPosition(start_b).position);
+assertEquals(1, script.locationFromPosition(start_b).line - comment_lines);
+assertEquals(13, script.locationFromPosition(start_b).column);
+
+// Test first position in function c().
+assertEquals(start_c, script.locationFromPosition(start_c).position);
+assertEquals(4, script.locationFromPosition(start_c).line - comment_lines);
+assertEquals(12, script.locationFromPosition(start_c).column);
+
+// Test first position in function d().
+assertEquals(start_d, script.locationFromPosition(start_d).position);
+assertEquals(11, script.locationFromPosition(start_d).line - comment_lines);
+assertEquals(10, script.locationFromPosition(start_d).column);
+
+// Test first line.
+assertEquals(0, script.locationFromLine().position);
+assertEquals(0, script.locationFromLine().line);
+assertEquals(0, script.locationFromLine().column);
+assertEquals(0, script.locationFromLine(0).position);
+assertEquals(0, script.locationFromLine(0).line);
+assertEquals(0, script.locationFromLine(0).column);
+
+// Test first line column 1.
+assertEquals(1, script.locationFromLine(0, 1).position);
+assertEquals(0, script.locationFromLine(0, 1).line);
+assertEquals(1, script.locationFromLine(0, 1).column);
+
+// Test first line offset 1.
+assertEquals(1, script.locationFromLine(0, 0, 1).position);
+assertEquals(0, script.locationFromLine(0, 0, 1).line);
+assertEquals(1, script.locationFromLine(0, 0, 1).column);
+
+// Test offset function a().
+assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);
+assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);
+assertEquals(start_a, script.locationFromLine(0, void 0, start_a).position);
+assertEquals(0, script.locationFromLine(0, void 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(0, void 0, start_a).column);
+assertEquals(start_a, script.locationFromLine(0, 0, start_a).position);
+assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(0, 0, start_a).column);
+
+// Test second line offset function a().
+assertEquals(start_a + 14, script.locationFromLine(1, 0, start_a).position);
+assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 0, start_a).column);
+
+// Test second line column 2 offset function a().
+assertEquals(start_a + 14 + 2, script.locationFromLine(1, 2, start_a).position);
+assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);
+assertEquals(2, script.locationFromLine(1, 2, start_a).column);
+
+// Test offset function b().
+assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);
+assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);
+assertEquals(13, script.locationFromLine(0, 0, start_b).column);
+
+// Test second line offset function b().
+assertEquals(start_b + 6, script.locationFromLine(1, 0, start_b).position);
+assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 0, start_b).column);
+
+// Test second line column 11 offset function b().
+assertEquals(start_b + 6 + 11, script.locationFromLine(1, 11, start_b).position);
+assertEquals(2, script.locationFromLine(1, 11, start_b).line - comment_lines);
+assertEquals(11, script.locationFromLine(1, 11, start_b).column);
+
+// Test second line column 12 offset function b. Second line in b is 11 long
+// using column 12 wraps to next line.
+assertEquals(start_b + 6 + 12, script.locationFromLine(1, 12, start_b).position);
+assertEquals(3, script.locationFromLine(1, 12, start_b).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 12, start_b).column);
+
+// Test the Debug.findSourcePosition which wraps SourceManager.
+assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);
+assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);
+assertEquals(6 + start_b, Debug.findFunctionSourceLocation(b, 1, 0).position);
+assertEquals(8 + start_b, Debug.findFunctionSourceLocation(b, 1, 2).position);
+assertEquals(18 + start_b, Debug.findFunctionSourceLocation(b, 2, 0).position);
+assertEquals(0 + start_c, Debug.findFunctionSourceLocation(c, 0, 0).position);
+assertEquals(7 + start_c, Debug.findFunctionSourceLocation(c, 1, 0).position);
+assertEquals(21 + start_c, Debug.findFunctionSourceLocation(c, 2, 0).position);
+assertEquals(38 + start_c, Debug.findFunctionSourceLocation(c, 3, 0).position);
+assertEquals(52 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);
+assertEquals(69 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);
+assertEquals(76 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);
+assertEquals(0 + start_d, Debug.findFunctionSourceLocation(d, 0, 0).position);
+assertEquals(7 + start_d, Debug.findFunctionSourceLocation(d, 1, 0).position);
+for (i = 1; i <= num_lines_d; i++) {
+ assertEquals(7 + (i * line_length_d) + start_d, Debug.findFunctionSourceLocation(d, (i + 1), 0).position);
+}
+assertEquals(175 + start_d, Debug.findFunctionSourceLocation(d, 17, 0).position);
+
+// Make sure invalid inputs work properly.
+assertEquals(0, script.locationFromPosition(-1).line);
+assertEquals(null, script.locationFromPosition(last_position + 1));
+
+// Test last position.
+assertEquals(last_position, script.locationFromPosition(last_position).position);
+assertEquals(last_line, script.locationFromPosition(last_position).line);
+assertEquals(last_column, script.locationFromPosition(last_position).column);
+
+// Test source line and restriction. All the following tests start from line 1
+// column 2 in function b, which is the call to c.
+// c(true);
+// ^
+
+var location;
+
+location = script.locationFromLine(1, 0, start_b);
+assertEquals(' c(true);', location.sourceText());
+
+result = ['c', ' c', ' c(', ' c(', ' c(t']
+for (var i = 1; i <= 5; i++) {
+ location = script.locationFromLine(1, 2, start_b);
+ location.restrict(i);
+ assertEquals(result[i - 1], location.sourceText());
+}
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(1, 0);
+assertEquals('c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 0);
+assertEquals('c(', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 1);
+assertEquals(' c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 2);
+assertEquals(' c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 3);
+assertEquals(' c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(3, 1);
+assertEquals(' c(', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(5, 0);
+assertEquals('c(tru', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(5, 2);
+assertEquals(' c(t', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(5, 4);
+assertEquals(' c(t', location.sourceText());
+
+// All the following tests start from line 1 column 10 in function b, which is
+// the final character.
+// c(true);
+// ^
+
+location = script.locationFromLine(1, 10, start_b);
+location.restrict(5, 0);
+assertEquals('rue);', location.sourceText());
+
+location = script.locationFromLine(1, 10, start_b);
+location.restrict(7, 0);
+assertEquals('(true);', location.sourceText());
+
+// All the following tests start from line 1 column 0 in function b, which is
+// the first character.
+// c(true);
+//^
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(5, 0);
+assertEquals(' c(t', location.sourceText());
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(5, 4);
+assertEquals(' c(t', location.sourceText());
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(7, 0);
+assertEquals(' c(tru', location.sourceText());
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(7, 6);
+assertEquals(' c(tru', location.sourceText());
+
+// Test that script.sourceLine(line) works.
+for (line = 0; line < num_lines_d; line++) {
+ var line_content_regexp = new RegExp(" x = " + (line + 1));
+ assertTrue(line_content_regexp.test(script.sourceLine(start_line_d + line)));
+}
diff --git a/V8Binding/v8/test/mjsunit/div-mod.js b/V8Binding/v8/test/mjsunit/div-mod.js
new file mode 100644
index 0000000..39fab27
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/div-mod.js
@@ -0,0 +1,95 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test fast div and mod.
+
+function divmod(div_func, mod_func, x, y) {
+ var div_answer = (div_func)(x);
+ assertEquals(x / y, div_answer, x + "/" + y);
+ var mod_answer = (mod_func)(x);
+ assertEquals(x % y, mod_answer, x + "%" + y);
+ var minus_div_answer = (div_func)(-x);
+ assertEquals(-x / y, minus_div_answer, "-" + x + "/" + y);
+ var minus_mod_answer = (mod_func)(-x);
+ assertEquals(-x % y, minus_mod_answer, "-" + x + "%" + y);
+}
+
+
+function run_tests_for(divisor) {
+ print("(function(left) { return left / " + divisor + "; })");
+ var div_func = this.eval("(function(left) { return left / " + divisor + "; })");
+ var mod_func = this.eval("(function(left) { return left % " + divisor + "; })");
+ var exp;
+ // Strange number test.
+ divmod(div_func, mod_func, 0, divisor);
+ divmod(div_func, mod_func, 1 / 0, divisor);
+ // Floating point number test.
+ for (exp = -1024; exp <= 1024; exp += 4) {
+ divmod(div_func, mod_func, Math.pow(2, exp), divisor);
+ divmod(div_func, mod_func, 0.9999999 * Math.pow(2, exp), divisor);
+ divmod(div_func, mod_func, 1.0000001 * Math.pow(2, exp), divisor);
+ }
+ // Integer number test.
+ for (exp = 0; exp <= 32; exp++) {
+ divmod(div_func, mod_func, 1 << exp, divisor);
+ divmod(div_func, mod_func, (1 << exp) + 1, divisor);
+ divmod(div_func, mod_func, (1 << exp) - 1, divisor);
+ }
+ divmod(div_func, mod_func, Math.floor(0x1fffffff / 3), divisor);
+ divmod(div_func, mod_func, Math.floor(-0x20000000 / 3), divisor);
+}
+
+
+var divisors = [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ // These ones in the middle don't add much apart from slowness to the test.
+ 0x1000000,
+ 0x2000000,
+ 0x4000000,
+ 0x8000000,
+ 0x10000000,
+ 0x20000000,
+ 0x40000000,
+ 12,
+ 60,
+ 100,
+ 1000 * 60 * 60 * 24];
+
+for (var i = 0; i < divisors.length; i++) {
+ run_tests_for(divisors[i]);
+}
+
diff --git a/V8Binding/v8/test/mjsunit/fuzz-natives.js b/V8Binding/v8/test/mjsunit/fuzz-natives.js
index debcc9a..c653b18 100644
--- a/V8Binding/v8/test/mjsunit/fuzz-natives.js
+++ b/V8Binding/v8/test/mjsunit/fuzz-natives.js
@@ -126,7 +126,9 @@ var knownProblems = {
"CreateArrayLiteralBoilerplate": true,
"IS_VAR": true,
"ResolvePossiblyDirectEval": true,
- "Log": true
+ "Log": true,
+
+ "CollectStackTrace": true
};
var currentlyUncallable = {
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-394.js b/V8Binding/v8/test/mjsunit/regress/regress-394.js
new file mode 100644
index 0000000..7b98205
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regress/regress-394.js
@@ -0,0 +1,47 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See <URL:http://code.google.com/p/v8/issues/detail?id=394>
+
+function setx(){
+ x=1;
+}
+
+function getx(){
+ return x;
+}
+
+setx()
+setx()
+__defineSetter__('x',function(){});
+__defineGetter__('x',function(){return 2;});
+setx()
+assertEquals(2, x);
+
+assertEquals(2, getx());
+assertEquals(2, getx());
+assertEquals(2, getx());
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-396.js b/V8Binding/v8/test/mjsunit/regress/regress-396.js
new file mode 100644
index 0000000..e6f2ce3
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regress/regress-396.js
@@ -0,0 +1,39 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// http://code.google.com/p/v8/issues/detail?id=396
+
+function DateYear(date) {
+ var string = date.getYear() + '';
+ if (string.length < 4) {
+ string = '' + (string - 0 + 1900);
+ }
+ return string;
+}
+
+assertEquals('1995', DateYear(new Date('Dec 25, 1995')));
+assertEquals('2005', DateYear(new Date('Dec 25, 2005')));
diff --git a/V8Binding/v8/test/mjsunit/smi-negative-zero.js b/V8Binding/v8/test/mjsunit/smi-negative-zero.js
index 99ddc97..afeb6de 100644
--- a/V8Binding/v8/test/mjsunit/smi-negative-zero.js
+++ b/V8Binding/v8/test/mjsunit/smi-negative-zero.js
@@ -37,64 +37,64 @@ var minus_four = -4;
// variable op variable
-assertEquals(one / (-zero), -Infinity);
+assertEquals(one / (-zero), -Infinity, "one / -0 I");
-assertEquals(one / (zero * minus_one), -Infinity);
-assertEquals(one / (minus_one * zero), -Infinity);
-assertEquals(one / (zero * zero), Infinity);
-assertEquals(one / (minus_one * minus_one), 1);
+assertEquals(one / (zero * minus_one), -Infinity, "one / -1");
+assertEquals(one / (minus_one * zero), -Infinity, "one / -0 II");
+assertEquals(one / (zero * zero), Infinity, "one / 0 I");
+assertEquals(one / (minus_one * minus_one), 1, "one / 1");
-assertEquals(one / (zero / minus_one), -Infinity);
-assertEquals(one / (zero / one), Infinity);
+assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
+assertEquals(one / (zero / one), Infinity, "one / 0 II");
-assertEquals(one / (minus_four % two), -Infinity);
-assertEquals(one / (minus_four % minus_two), -Infinity);
-assertEquals(one / (four % two), Infinity);
-assertEquals(one / (four % minus_two), Infinity);
+assertEquals(one / (minus_four % two), -Infinity, "foo");
+assertEquals(one / (minus_four % minus_two), -Infinity, "foo");
+assertEquals(one / (four % two), Infinity, "foo");
+assertEquals(one / (four % minus_two), Infinity, "foo");
// literal op variable
-assertEquals(one / (0 * minus_one), -Infinity);
-assertEquals(one / (-1 * zero), -Infinity);
-assertEquals(one / (0 * zero), Infinity);
-assertEquals(one / (-1 * minus_one), 1);
+assertEquals(one / (0 * minus_one), -Infinity, "bar");
+assertEquals(one / (-1 * zero), -Infinity, "bar");
+assertEquals(one / (0 * zero), Infinity, "bar");
+assertEquals(one / (-1 * minus_one), 1, "bar");
-assertEquals(one / (0 / minus_one), -Infinity);
-assertEquals(one / (0 / one), Infinity);
+assertEquals(one / (0 / minus_one), -Infinity, "baz");
+assertEquals(one / (0 / one), Infinity, "baz");
-assertEquals(one / (-4 % two), -Infinity);
-assertEquals(one / (-4 % minus_two), -Infinity);
-assertEquals(one / (4 % two), Infinity);
-assertEquals(one / (4 % minus_two), Infinity);
+assertEquals(one / (-4 % two), -Infinity, "baz");
+assertEquals(one / (-4 % minus_two), -Infinity, "baz");
+assertEquals(one / (4 % two), Infinity, "baz");
+assertEquals(one / (4 % minus_two), Infinity, "baz");
// variable op literal
-assertEquals(one / (zero * -1), -Infinity);
-assertEquals(one / (minus_one * 0), -Infinity);
-assertEquals(one / (zero * 0), Infinity);
-assertEquals(one / (minus_one * -1), 1);
+assertEquals(one / (zero * -1), -Infinity, "fizz");
+assertEquals(one / (minus_one * 0), -Infinity, "fizz");
+assertEquals(one / (zero * 0), Infinity, "fizz");
+assertEquals(one / (minus_one * -1), 1, "fizz");
-assertEquals(one / (zero / -1), -Infinity);
-assertEquals(one / (zero / 1), Infinity);
+assertEquals(one / (zero / -1), -Infinity, "buzz");
+assertEquals(one / (zero / 1), Infinity, "buzz");
-assertEquals(one / (minus_four % 2), -Infinity);
-assertEquals(one / (minus_four % -2), -Infinity);
-assertEquals(one / (four % 2), Infinity);
-assertEquals(one / (four % -2), Infinity);
+assertEquals(one / (minus_four % 2), -Infinity, "buzz");
+assertEquals(one / (minus_four % -2), -Infinity, "buzz");
+assertEquals(one / (four % 2), Infinity, "buzz");
+assertEquals(one / (four % -2), Infinity, "buzz");
// literal op literal
-assertEquals(one / (-0), -Infinity);
+assertEquals(one / (-0), -Infinity, "fisk1");
-assertEquals(one / (0 * -1), -Infinity);
-assertEquals(one / (-1 * 0), -Infinity);
-assertEquals(one / (0 * 0), Infinity);
-assertEquals(one / (-1 * -1), 1);
+assertEquals(one / (0 * -1), -Infinity, "fisk2");
+assertEquals(one / (-1 * 0), -Infinity, "fisk3");
+assertEquals(one / (0 * 0), Infinity, "fisk4");
+assertEquals(one / (-1 * -1), 1, "fisk5");
-assertEquals(one / (0 / -1), -Infinity);
-assertEquals(one / (0 / 1), Infinity);
+assertEquals(one / (0 / -1), -Infinity, "hest");
+assertEquals(one / (0 / 1), Infinity, "hest");
-assertEquals(one / (-4 % 2), -Infinity);
-assertEquals(one / (-4 % -2), -Infinity);
-assertEquals(one / (4 % 2), Infinity);
-assertEquals(one / (4 % -2), Infinity);
+assertEquals(one / (-4 % 2), -Infinity, "fiskhest");
+assertEquals(one / (-4 % -2), -Infinity, "fiskhest");
+assertEquals(one / (4 % 2), Infinity, "fiskhest");
+assertEquals(one / (4 % -2), Infinity, "fiskhest");
diff --git a/V8Binding/v8/test/mjsunit/stack-traces.js b/V8Binding/v8/test/mjsunit/stack-traces.js
index 6ac8b0a..e457ece 100644
--- a/V8Binding/v8/test/mjsunit/stack-traces.js
+++ b/V8Binding/v8/test/mjsunit/stack-traces.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Error.captureStackTraces = true;
-
function testMethodNameInference() {
function Foo() { }
Foo.prototype.bar = function () { FAIL; };
@@ -75,6 +73,17 @@ function testConstructor() {
new Plonk();
}
+function testRenamedMethod() {
+ function a$b$c$d() { return FAIL; }
+ function Wookie() { }
+ Wookie.prototype.d = a$b$c$d;
+ (new Wookie).d();
+}
+
+function testAnonymousMethod() {
+ (function () { FAIL }).call([1, 2, 3]);
+}
+
// Utility function for testing that the expected strings occur
// in the stack trace produced when running the given function.
function testTrace(fun, expected) {
@@ -151,9 +160,11 @@ testTrace(testNested, ["at one", "at two", "at three"]);
testTrace(testMethodNameInference, ["at Foo.bar"]);
testTrace(testImplicitConversion, ["at Nirk.valueOf"]);
testTrace(testEval, ["at Doo (eval at testEval"]);
-testTrace(testNestedEval, ["at eval (eval at Inner (eval at Outer"]);
+testTrace(testNestedEval, ["eval at Inner (eval at Outer"]);
testTrace(testValue, ["at Number.causeError"]);
testTrace(testConstructor, ["new Plonk"]);
+testTrace(testRenamedMethod, ["Wookie.a$b$c$d [as d]"]);
+testTrace(testAnonymousMethod, ["Array.<anonymous>"]);
testCallerCensorship();
testUnintendedCallerCensorship();
diff --git a/V8Binding/v8/test/mozilla/mozilla.status b/V8Binding/v8/test/mozilla/mozilla.status
index 760ed41..13ae29c 100644
--- a/V8Binding/v8/test/mozilla/mozilla.status
+++ b/V8Binding/v8/test/mozilla/mozilla.status
@@ -476,12 +476,11 @@ js1_2/Array/array_split_1: FAIL_OK
js1_5/Array/regress-313153: FAIL_OK
-# Properties stack, fileName, and lineNumber of Error instances are
+# Properties fileName, and lineNumber of Error instances are
# not supported. Mozilla specific extension.
js1_5/Exceptions/errstack-001: FAIL_OK
js1_5/Exceptions/regress-257751: FAIL_OK
js1_5/Regress/regress-119719: FAIL_OK
-js1_5/Regress/regress-139316: FAIL_OK
js1_5/Regress/regress-167328: FAIL_OK
js1_5/Regress/regress-243869: FAIL_OK